X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=1b4570586043aaaa30497a739b4a4e5d1563f9f0;hb=d8dd31652cf41e1ab0f4e23aab605a1d2114ac8d;hp=c603d2b0a8f46eecf30a82f44c01cda29dcc8af3;hpb=272733b5ebfd1ec44dc801c36cfbe4af761b4bd7;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index c603d2b0a8..1b45705860 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -1,34 +1,6 @@ -/*- - * BSD LICENSE - * - * Copyright 2015 6WIND S.A. - * Copyright 2015 Mellanox. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of 6WIND S.A. nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright 2015 6WIND S.A. + * Copyright 2015 Mellanox. */ #include @@ -52,7 +24,7 @@ #include #include -#include +#include #include #include #include @@ -63,122 +35,7 @@ #include "mlx5_utils.h" #include "mlx5_autoconf.h" #include "mlx5_defs.h" - -/* Initialization data for hash RX queues. */ -const struct hash_rxq_init hash_rxq_init[] = { - [HASH_RXQ_TCPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, - .flow_priority = 0, - .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_TCP, - .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], - }, - [HASH_RXQ_UDPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, - .flow_priority = 0, - .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_UDP, - .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], - }, - [HASH_RXQ_IPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4), - .dpdk_rss_hf = (ETH_RSS_IPV4 | - ETH_RSS_FRAG_IPV4), - .flow_priority = 1, - .flow_spec.ipv4 = { - .type = IBV_FLOW_SPEC_IPV4, - .size = sizeof(hash_rxq_init[0].flow_spec.ipv4), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_ETH], - }, - [HASH_RXQ_TCPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, - .flow_priority = 0, - .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_TCP, - .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], - }, - [HASH_RXQ_UDPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, - .flow_priority = 0, - .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_UDP, - .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], - }, - [HASH_RXQ_IPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6), - .dpdk_rss_hf = (ETH_RSS_IPV6 | - ETH_RSS_FRAG_IPV6), - .flow_priority = 1, - .flow_spec.ipv6 = { - .type = IBV_FLOW_SPEC_IPV6, - .size = sizeof(hash_rxq_init[0].flow_spec.ipv6), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_ETH], - }, - [HASH_RXQ_ETH] = { - .hash_fields = 0, - .dpdk_rss_hf = 0, - .flow_priority = 2, - .flow_spec.eth = { - .type = IBV_FLOW_SPEC_ETH, - .size = sizeof(hash_rxq_init[0].flow_spec.eth), - }, - .underlayer = NULL, - }, -}; - -/* Number of entries in hash_rxq_init[]. */ -const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); - -/* Initialization data for hash RX queue indirection tables. */ -static const struct ind_table_init ind_table_init[] = { - { - .max_size = -1u, /* Superseded by HW limitations. */ - .hash_types = - 1 << HASH_RXQ_TCPV4 | - 1 << HASH_RXQ_UDPV4 | - 1 << HASH_RXQ_IPV4 | - 1 << HASH_RXQ_TCPV6 | - 1 << HASH_RXQ_UDPV6 | - 1 << HASH_RXQ_IPV6 | - 0, - .hash_types_n = 6, - }, - { - .max_size = 1, - .hash_types = 1 << HASH_RXQ_ETH, - .hash_types_n = 1, - }, -}; - -#define IND_TABLE_INIT_N RTE_DIM(ind_table_init) +#include "mlx5_glue.h" /* Default RSS hash key also used for ConnectX-3. */ uint8_t rss_hash_default_key[] = { @@ -197,360 +54,6 @@ uint8_t rss_hash_default_key[] = { /* Length of the default RSS hash key. */ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); -/** - * Populate flow steering rule for a given hash RX queue type using - * information from hash_rxq_init[]. Nothing is written to flow_attr when - * flow_attr_size is not large enough, but the required size is still returned. - * - * @param priv - * Pointer to private structure. - * @param[out] flow_attr - * Pointer to flow attribute structure to fill. Note that the allocated - * area must be larger and large enough to hold all flow specifications. - * @param flow_attr_size - * Entire size of flow_attr and trailing room for flow specifications. - * @param type - * Hash RX queue type to use for flow steering rule. - * - * @return - * Total size of the flow attribute buffer. No errors are defined. - */ -size_t -priv_flow_attr(struct priv *priv, struct ibv_flow_attr *flow_attr, - size_t flow_attr_size, enum hash_rxq_type type) -{ - size_t offset = sizeof(*flow_attr); - const struct hash_rxq_init *init = &hash_rxq_init[type]; - - assert(priv != NULL); - assert((size_t)type < RTE_DIM(hash_rxq_init)); - do { - offset += init->flow_spec.hdr.size; - init = init->underlayer; - } while (init != NULL); - if (offset > flow_attr_size) - return offset; - flow_attr_size = offset; - init = &hash_rxq_init[type]; - *flow_attr = (struct ibv_flow_attr){ - .type = IBV_FLOW_ATTR_NORMAL, - /* Priorities < 3 are reserved for flow director. */ - .priority = init->flow_priority + 3, - .num_of_specs = 0, - .port = priv->port, - .flags = 0, - }; - do { - offset -= init->flow_spec.hdr.size; - memcpy((void *)((uintptr_t)flow_attr + offset), - &init->flow_spec, - init->flow_spec.hdr.size); - ++flow_attr->num_of_specs; - init = init->underlayer; - } while (init != NULL); - return flow_attr_size; -} - -/** - * Convert hash type position in indirection table initializer to - * hash RX queue type. - * - * @param table - * Indirection table initializer. - * @param pos - * Hash type position. - * - * @return - * Hash RX queue type. - */ -static enum hash_rxq_type -hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos) -{ - enum hash_rxq_type type = HASH_RXQ_TCPV4; - - assert(pos < table->hash_types_n); - do { - if ((table->hash_types & (1 << type)) && (pos-- == 0)) - break; - ++type; - } while (1); - return type; -} - -/** - * Filter out disabled hash RX queue types from ind_table_init[]. - * - * @param priv - * Pointer to private structure. - * @param[out] table - * Output table. - * - * @return - * Number of table entries. - */ -static unsigned int -priv_make_ind_table_init(struct priv *priv, - struct ind_table_init (*table)[IND_TABLE_INIT_N]) -{ - uint64_t rss_hf; - unsigned int i; - unsigned int j; - unsigned int table_n = 0; - /* Mandatory to receive frames not handled by normal hash RX queues. */ - unsigned int hash_types_sup = 1 << HASH_RXQ_ETH; - - rss_hf = priv->rss_hf; - /* Process other protocols only if more than one queue. */ - if (priv->rxqs_n > 1) - for (i = 0; (i != hash_rxq_init_n); ++i) - if (rss_hf & hash_rxq_init[i].dpdk_rss_hf) - hash_types_sup |= (1 << i); - - /* Filter out entries whose protocols are not in the set. */ - for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) { - unsigned int nb; - unsigned int h; - - /* j is increased only if the table has valid protocols. */ - assert(j <= i); - (*table)[j] = ind_table_init[i]; - (*table)[j].hash_types &= hash_types_sup; - for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h) - if (((*table)[j].hash_types >> h) & 0x1) - ++nb; - (*table)[i].hash_types_n = nb; - if (nb) { - ++table_n; - ++j; - } - } - return table_n; -} - -/** - * Initialize hash RX queues and indirection table. - * - * @param priv - * Pointer to private structure. - * - * @return - * 0 on success, errno value on failure. - */ -int -priv_create_hash_rxqs(struct priv *priv) -{ - struct ibv_wq *wqs[priv->reta_idx_n]; - struct ind_table_init ind_table_init[IND_TABLE_INIT_N]; - unsigned int ind_tables_n = - priv_make_ind_table_init(priv, &ind_table_init); - unsigned int hash_rxqs_n = 0; - struct hash_rxq (*hash_rxqs)[] = NULL; - struct ibv_rwq_ind_table *(*ind_tables)[] = NULL; - unsigned int i; - unsigned int j; - unsigned int k; - int err = 0; - - assert(priv->ind_tables == NULL); - assert(priv->ind_tables_n == 0); - assert(priv->hash_rxqs == NULL); - assert(priv->hash_rxqs_n == 0); - assert(priv->pd != NULL); - assert(priv->ctx != NULL); - if (priv->isolated) - return 0; - if (priv->rxqs_n == 0) - return EINVAL; - assert(priv->rxqs != NULL); - if (ind_tables_n == 0) { - ERROR("all hash RX queue types have been filtered out," - " indirection table cannot be created"); - return EINVAL; - } - if (priv->rxqs_n & (priv->rxqs_n - 1)) { - INFO("%u RX queues are configured, consider rounding this" - " number to the next power of two for better balancing", - priv->rxqs_n); - DEBUG("indirection table extended to assume %u WQs", - priv->reta_idx_n); - } - for (i = 0; (i != priv->reta_idx_n); ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; - - rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]], - struct mlx5_rxq_ctrl, rxq); - wqs[i] = rxq_ctrl->ibv->wq; - } - /* Get number of hash RX queues to configure. */ - for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i) - hash_rxqs_n += ind_table_init[i].hash_types_n; - DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables", - hash_rxqs_n, priv->rxqs_n, ind_tables_n); - /* Create indirection tables. */ - ind_tables = rte_calloc(__func__, ind_tables_n, - sizeof((*ind_tables)[0]), 0); - if (ind_tables == NULL) { - err = ENOMEM; - ERROR("cannot allocate indirection tables container: %s", - strerror(err)); - goto error; - } - for (i = 0; (i != ind_tables_n); ++i) { - struct ibv_rwq_ind_table_init_attr ind_init_attr = { - .log_ind_tbl_size = 0, /* Set below. */ - .ind_tbl = wqs, - .comp_mask = 0, - }; - unsigned int ind_tbl_size = ind_table_init[i].max_size; - struct ibv_rwq_ind_table *ind_table; - - if (priv->reta_idx_n < ind_tbl_size) - ind_tbl_size = priv->reta_idx_n; - ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size); - errno = 0; - ind_table = ibv_create_rwq_ind_table(priv->ctx, - &ind_init_attr); - if (ind_table != NULL) { - (*ind_tables)[i] = ind_table; - continue; - } - /* Not clear whether errno is set. */ - err = (errno ? errno : EINVAL); - ERROR("RX indirection table creation failed with error %d: %s", - err, strerror(err)); - goto error; - } - /* Allocate array that holds hash RX queues and related data. */ - hash_rxqs = rte_calloc(__func__, hash_rxqs_n, - sizeof((*hash_rxqs)[0]), 0); - if (hash_rxqs == NULL) { - err = ENOMEM; - ERROR("cannot allocate hash RX queues container: %s", - strerror(err)); - goto error; - } - for (i = 0, j = 0, k = 0; - ((i != hash_rxqs_n) && (j != ind_tables_n)); - ++i) { - struct hash_rxq *hash_rxq = &(*hash_rxqs)[i]; - enum hash_rxq_type type = - hash_rxq_type_from_pos(&ind_table_init[j], k); - struct rte_eth_rss_conf *priv_rss_conf = - (*priv->rss_conf)[type]; - struct ibv_rx_hash_conf hash_conf = { - .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = (priv_rss_conf ? - priv_rss_conf->rss_key_len : - rss_hash_default_key_len), - .rx_hash_key = (priv_rss_conf ? - priv_rss_conf->rss_key : - rss_hash_default_key), - .rx_hash_fields_mask = hash_rxq_init[type].hash_fields, - }; - struct ibv_qp_init_attr_ex qp_init_attr = { - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = (IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH), - .rx_hash_conf = hash_conf, - .rwq_ind_tbl = (*ind_tables)[j], - .pd = priv->pd, - }; - - DEBUG("using indirection table %u for hash RX queue %u type %d", - j, i, type); - *hash_rxq = (struct hash_rxq){ - .priv = priv, - .qp = ibv_create_qp_ex(priv->ctx, &qp_init_attr), - .type = type, - }; - if (hash_rxq->qp == NULL) { - err = (errno ? errno : EINVAL); - ERROR("Hash RX QP creation failure: %s", - strerror(err)); - goto error; - } - if (++k < ind_table_init[j].hash_types_n) - continue; - /* Switch to the next indirection table and reset hash RX - * queue type array index. */ - ++j; - k = 0; - } - priv->ind_tables = ind_tables; - priv->ind_tables_n = ind_tables_n; - priv->hash_rxqs = hash_rxqs; - priv->hash_rxqs_n = hash_rxqs_n; - assert(err == 0); - return 0; -error: - if (hash_rxqs != NULL) { - for (i = 0; (i != hash_rxqs_n); ++i) { - struct ibv_qp *qp = (*hash_rxqs)[i].qp; - - if (qp == NULL) - continue; - claim_zero(ibv_destroy_qp(qp)); - } - rte_free(hash_rxqs); - } - if (ind_tables != NULL) { - for (j = 0; (j != ind_tables_n); ++j) { - struct ibv_rwq_ind_table *ind_table = - (*ind_tables)[j]; - - if (ind_table == NULL) - continue; - claim_zero(ibv_destroy_rwq_ind_table(ind_table)); - } - rte_free(ind_tables); - } - return err; -} - -/** - * Clean up hash RX queues and indirection table. - * - * @param priv - * Pointer to private structure. - */ -void -priv_destroy_hash_rxqs(struct priv *priv) -{ - unsigned int i; - - DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n); - if (priv->hash_rxqs_n == 0) { - assert(priv->hash_rxqs == NULL); - assert(priv->ind_tables == NULL); - return; - } - for (i = 0; (i != priv->hash_rxqs_n); ++i) { - struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; - unsigned int j, k; - - assert(hash_rxq->priv == priv); - assert(hash_rxq->qp != NULL); - for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j) - for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k) - assert(hash_rxq->mac_flow[j][k] == NULL); - claim_zero(ibv_destroy_qp(hash_rxq->qp)); - } - priv->hash_rxqs_n = 0; - rte_free(priv->hash_rxqs); - priv->hash_rxqs = NULL; - for (i = 0; (i != priv->ind_tables_n); ++i) { - struct ibv_rwq_ind_table *ind_table = - (*priv->ind_tables)[i]; - - assert(ind_table != NULL); - claim_zero(ibv_destroy_rwq_ind_table(ind_table)); - } - priv->ind_tables_n = 0; - rte_free(priv->ind_tables); - priv->ind_tables = NULL; -} - /** * Allocate RX queue elements. * @@ -558,7 +61,7 @@ priv_destroy_hash_rxqs(struct priv *priv) * Pointer to RX queue structure. * * @return - * 0 on success, errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) @@ -566,7 +69,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int elts_n = 1 << rxq_ctrl->rxq.elts_n; unsigned int i; - int ret = 0; + int err; /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { @@ -574,8 +77,9 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); if (buf == NULL) { - ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); - ret = ENOMEM; + DRV_LOG(ERR, "port %u empty mbuf pool", + rxq_ctrl->priv->dev->data->port_id); + rte_errno = ENOMEM; goto error; } /* Headroom is reserved by rte_pktmbuf_alloc(). */ @@ -594,7 +98,7 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) (*rxq_ctrl->rxq.elts)[i] = buf; } /* If Rx vector is activated. */ - if (rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { + if (mlx5_rxq_check_vec_support(&rxq_ctrl->rxq) > 0) { struct mlx5_rxq_data *rxq = &rxq_ctrl->rxq; struct rte_mbuf *mbuf_init = &rxq->fake_mbuf; int j; @@ -615,20 +119,24 @@ rxq_alloc_elts(struct mlx5_rxq_ctrl *rxq_ctrl) for (j = 0; j < MLX5_VPMD_DESCS_PER_LOOP; ++j) (*rxq->elts)[elts_n + j] = &rxq->fake_mbuf; } - DEBUG("%p: allocated and configured %u segments (max %u packets)", - (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); - assert(ret == 0); + DRV_LOG(DEBUG, + "port %u Rx queue %u allocated and configured %u segments" + " (max %u packets)", + rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx, elts_n, + elts_n / (1 << rxq_ctrl->rxq.sges_n)); return 0; error: + err = rte_errno; /* Save rte_errno before cleanup. */ elts_n = i; for (i = 0; (i != elts_n); ++i) { if ((*rxq_ctrl->rxq.elts)[i] != NULL) rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); (*rxq_ctrl->rxq.elts)[i] = NULL; } - DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); - assert(ret > 0); - return ret; + DRV_LOG(DEBUG, "port %u Rx queue %u failed, freed everything", + rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; } /** @@ -646,14 +154,15 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) uint16_t used = q_n - (rxq->rq_ci - rxq->rq_pi); uint16_t i; - DEBUG("%p: freeing WRs", (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u Rx queue %u freeing WRs", + rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); if (rxq->elts == NULL) return; /** * Some mbuf in the Ring belongs to the application. They cannot be * freed. */ - if (rxq_check_vec_support(rxq) > 0) { + if (mlx5_rxq_check_vec_support(rxq) > 0) { for (i = 0; i < used; ++i) (*rxq->elts)[(rxq->rq_ci + i) & q_mask] = NULL; rxq->rq_pi = rxq->rq_ci; @@ -676,12 +185,83 @@ rxq_free_elts(struct mlx5_rxq_ctrl *rxq_ctrl) void mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) { - DEBUG("cleaning up %p", (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u cleaning up Rx queue %u", + rxq_ctrl->priv->dev->data->port_id, rxq_ctrl->idx); if (rxq_ctrl->ibv) - mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); } +/** + * Returns the per-queue supported offloads. + * + * @param dev + * Pointer to Ethernet device. + * + * @return + * Supported Rx offloads. + */ +uint64_t +mlx5_get_rx_queue_offloads(struct rte_eth_dev *dev) +{ + struct priv *priv = dev->data->dev_private; + struct mlx5_dev_config *config = &priv->config; + uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER | + DEV_RX_OFFLOAD_TIMESTAMP | + DEV_RX_OFFLOAD_JUMBO_FRAME); + + if (config->hw_fcs_strip) + offloads |= DEV_RX_OFFLOAD_CRC_STRIP; + if (config->hw_csum) + offloads |= (DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM); + if (config->hw_vlan_strip) + offloads |= DEV_RX_OFFLOAD_VLAN_STRIP; + return offloads; +} + + +/** + * Returns the per-port supported offloads. + * + * @return + * Supported Rx offloads. + */ +uint64_t +mlx5_get_rx_port_offloads(void) +{ + uint64_t offloads = DEV_RX_OFFLOAD_VLAN_FILTER; + + return offloads; +} + +/** + * Checks if the per-queue offload configuration is valid. + * + * @param dev + * Pointer to Ethernet device. + * @param offloads + * Per-queue offloads configuration. + * + * @return + * 1 if the configuration is valid, 0 otherwise. + */ +static int +mlx5_is_rx_queue_offloads_allowed(struct rte_eth_dev *dev, uint64_t offloads) +{ + uint64_t port_offloads = dev->data->dev_conf.rxmode.offloads; + uint64_t queue_supp_offloads = mlx5_get_rx_queue_offloads(dev); + uint64_t port_supp_offloads = mlx5_get_rx_port_offloads(); + + if ((offloads & (queue_supp_offloads | port_supp_offloads)) != + offloads) + return 0; + if (((port_offloads ^ offloads) & port_supp_offloads)) + return 0; + return 1; +} + /** * * @param dev @@ -698,7 +278,7 @@ mlx5_rxq_cleanup(struct mlx5_rxq_ctrl *rxq_ctrl) * Memory pool for buffer allocations. * * @return - * 0 on success, negative errno value on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, @@ -709,46 +289,52 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, struct mlx5_rxq_data *rxq = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); - int ret = 0; - (void)conf; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; - priv_lock(priv); if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); - WARN("%p: increased number of descriptors in RX queue %u" - " to the next power of two (%d)", - (void *)dev, idx, desc); + DRV_LOG(WARNING, + "port %u increased number of descriptors in Rx queue %u" + " to the next power of two (%d)", + dev->data->port_id, idx, desc); } - DEBUG("%p: configuring queue %u for %u descriptors", - (void *)dev, idx, desc); + DRV_LOG(DEBUG, "port %u configuring Rx queue %u for %u descriptors", + dev->data->port_id, idx, desc); if (idx >= priv->rxqs_n) { - ERROR("%p: queue index out of range (%u >= %u)", - (void *)dev, idx, priv->rxqs_n); - priv_unlock(priv); - return -EOVERFLOW; + DRV_LOG(ERR, "port %u Rx queue index out of range (%u >= %u)", + dev->data->port_id, idx, priv->rxqs_n); + rte_errno = EOVERFLOW; + return -rte_errno; + } + if (!mlx5_is_rx_queue_offloads_allowed(dev, conf->offloads)) { + DRV_LOG(ERR, + "port %u Rx queue offloads 0x%" PRIx64 " don't match" + " port offloads 0x%" PRIx64 " or supported offloads 0x%" + PRIx64, + dev->data->port_id, conf->offloads, + dev->data->dev_conf.rxmode.offloads, + (mlx5_get_rx_port_offloads() | + mlx5_get_rx_queue_offloads(dev))); + rte_errno = ENOTSUP; + return -rte_errno; } - if (!mlx5_priv_rxq_releasable(priv, idx)) { - ret = EBUSY; - ERROR("%p: unable to release queue index %u", - (void *)dev, idx); - goto out; + if (!mlx5_rxq_releasable(dev, idx)) { + DRV_LOG(ERR, "port %u unable to release queue index %u", + dev->data->port_id, idx); + rte_errno = EBUSY; + return -rte_errno; } - mlx5_priv_rxq_release(priv, idx); - rxq_ctrl = mlx5_priv_rxq_new(priv, idx, desc, socket, mp); + mlx5_rxq_release(dev, idx); + rxq_ctrl = mlx5_rxq_new(dev, idx, desc, socket, conf, mp); if (!rxq_ctrl) { - ERROR("%p: unable to allocate queue index %u", - (void *)dev, idx); - ret = ENOMEM; - goto out; + DRV_LOG(ERR, "port %u unable to allocate queue index %u", + dev->data->port_id, idx); + rte_errno = ENOMEM; + return -rte_errno; } - DEBUG("%p: adding RX queue %p to list", - (void *)dev, (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u adding Rx queue %u to list", + dev->data->port_id, idx); (*priv->rxqs)[idx] = &rxq_ctrl->rxq; -out: - priv_unlock(priv); - return -ret; + return 0; } /** @@ -764,53 +350,52 @@ mlx5_rx_queue_release(void *dpdk_rxq) struct mlx5_rxq_ctrl *rxq_ctrl; struct priv *priv; - if (mlx5_is_secondary()) - return; - if (rxq == NULL) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); priv = rxq_ctrl->priv; - priv_lock(priv); - if (!mlx5_priv_rxq_releasable(priv, rxq_ctrl->rxq.stats.idx)) - rte_panic("Rx queue %p is still used by a flow and cannot be" - " removed\n", (void *)rxq_ctrl); - mlx5_priv_rxq_release(priv, rxq_ctrl->rxq.stats.idx); - priv_unlock(priv); + if (!mlx5_rxq_releasable(priv->dev, rxq_ctrl->rxq.stats.idx)) + rte_panic("port %u Rx queue %u is still used by a flow and" + " cannot be removed\n", priv->dev->data->port_id, + rxq_ctrl->idx); + mlx5_rxq_release(priv->dev, rxq_ctrl->rxq.stats.idx); } /** * Allocate queue vector and fill epoll fd list for Rx interrupts. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int -priv_rx_intr_vec_enable(struct priv *priv) +mlx5_rx_intr_vec_enable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; unsigned int n = RTE_MIN(rxqs_n, (uint32_t)RTE_MAX_RXTX_INTR_VEC_ID); unsigned int count = 0; struct rte_intr_handle *intr_handle = priv->dev->intr_handle; - assert(!mlx5_is_secondary()); if (!priv->dev->data->dev_conf.intr_conf.rxq) return 0; - priv_rx_intr_vec_disable(priv); - intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n])); + mlx5_rx_intr_vec_disable(dev); + intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); if (intr_handle->intr_vec == NULL) { - ERROR("failed to allocate memory for interrupt vector," - " Rx interrupts will not be supported"); - return -ENOMEM; + DRV_LOG(ERR, + "port %u failed to allocate memory for interrupt" + " vector, Rx interrupts will not be supported", + dev->data->port_id); + rte_errno = ENOMEM; + return -rte_errno; } intr_handle->type = RTE_INTR_HANDLE_EXT; for (i = 0; i != n; ++i) { /* This rxq ibv must not be released in this function. */ - struct mlx5_rxq_ibv *rxq_ibv = mlx5_priv_rxq_ibv_get(priv, i); + struct mlx5_rxq_ibv *rxq_ibv = mlx5_rxq_ibv_get(dev, i); int fd; int flags; int rc; @@ -824,27 +409,34 @@ priv_rx_intr_vec_enable(struct priv *priv) continue; } if (count >= RTE_MAX_RXTX_INTR_VEC_ID) { - ERROR("too many Rx queues for interrupt vector size" - " (%d), Rx interrupts cannot be enabled", - RTE_MAX_RXTX_INTR_VEC_ID); - priv_rx_intr_vec_disable(priv); - return -1; + DRV_LOG(ERR, + "port %u too many Rx queues for interrupt" + " vector size (%d), Rx interrupts cannot be" + " enabled", + dev->data->port_id, RTE_MAX_RXTX_INTR_VEC_ID); + mlx5_rx_intr_vec_disable(dev); + rte_errno = ENOMEM; + return -rte_errno; } fd = rxq_ibv->channel->fd; flags = fcntl(fd, F_GETFL); rc = fcntl(fd, F_SETFL, flags | O_NONBLOCK); if (rc < 0) { - ERROR("failed to make Rx interrupt file descriptor" - " %d non-blocking for queue index %d", fd, i); - priv_rx_intr_vec_disable(priv); - return -1; + rte_errno = errno; + DRV_LOG(ERR, + "port %u failed to make Rx interrupt file" + " descriptor %d non-blocking for queue index" + " %d", + dev->data->port_id, fd, i); + mlx5_rx_intr_vec_disable(dev); + return -rte_errno; } intr_handle->intr_vec[i] = RTE_INTR_VEC_RXTX_OFFSET + count; intr_handle->efds[count] = fd; count++; } if (!count) - priv_rx_intr_vec_disable(priv); + mlx5_rx_intr_vec_disable(dev); else intr_handle->nb_efd = count; return 0; @@ -853,12 +445,13 @@ priv_rx_intr_vec_enable(struct priv *priv) /** * Clean up Rx interrupts handler. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. */ void -priv_rx_intr_vec_disable(struct priv *priv) +mlx5_rx_intr_vec_disable(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct rte_intr_handle *intr_handle = priv->dev->intr_handle; unsigned int i; unsigned int rxqs_n = priv->rxqs_n; @@ -866,6 +459,8 @@ priv_rx_intr_vec_disable(struct priv *priv) if (!priv->dev->data->dev_conf.intr_conf.rxq) return; + if (!intr_handle->intr_vec) + goto free; for (i = 0; i != n; ++i) { struct mlx5_rxq_ctrl *rxq_ctrl; struct mlx5_rxq_data *rxq_data; @@ -879,10 +474,12 @@ priv_rx_intr_vec_disable(struct priv *priv) */ rxq_data = (*priv->rxqs)[i]; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); - mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); + mlx5_rxq_ibv_release(rxq_ctrl->ibv); } +free: rte_intr_free_epoll_fd(intr_handle); - free(intr_handle->intr_vec); + if (intr_handle->intr_vec) + free(intr_handle->intr_vec); intr_handle->nb_efd = 0; intr_handle->intr_vec = NULL; } @@ -908,7 +505,6 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) doorbell = (uint64_t)doorbell_hi << 32; doorbell |= rxq->cqn; rxq->cq_db[MLX5_CQ_ARM_DB] = rte_cpu_to_be_32(doorbell_hi); - rte_wmb(); rte_write64(rte_cpu_to_be_64(doorbell), cq_db_reg); } @@ -921,39 +517,33 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; - int ret = 0; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->irq) { struct mlx5_rxq_ibv *rxq_ibv; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } mlx5_arm_cq(rxq_data, rxq_data->cq_arm_sn); - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); + mlx5_rxq_ibv_release(rxq_ibv); } -exit: - priv_unlock(priv); - if (ret) - WARN("unable to arm interrupt on rx queue %d", rx_queue_id); - return -ret; + return 0; } /** @@ -965,70 +555,74 @@ exit: * Rx queue number. * * @return - * 0 on success, negative on failure. + * 0 on success, a negative errno value otherwise and rte_errno is set. */ int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; struct mlx5_rxq_ibv *rxq_ibv = NULL; struct ibv_cq *ev_cq; void *ev_ctx; - int ret = 0; + int ret; - priv_lock(priv); rxq_data = (*priv->rxqs)[rx_queue_id]; if (!rxq_data) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (!rxq_ctrl->irq) - goto exit; - rxq_ibv = mlx5_priv_rxq_ibv_get(priv, rx_queue_id); + return 0; + rxq_ibv = mlx5_rxq_ibv_get(dev, rx_queue_id); if (!rxq_ibv) { - ret = EINVAL; - goto exit; + rte_errno = EINVAL; + return -rte_errno; } - ret = ibv_get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); + ret = mlx5_glue->get_cq_event(rxq_ibv->channel, &ev_cq, &ev_ctx); if (ret || ev_cq != rxq_ibv->cq) { - ret = EINVAL; + rte_errno = EINVAL; goto exit; } rxq_data->cq_arm_sn++; - ibv_ack_cq_events(rxq_ibv->cq, 1); + mlx5_glue->ack_cq_events(rxq_ibv->cq, 1); + return 0; exit: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (rxq_ibv) - mlx5_priv_rxq_ibv_release(priv, rxq_ibv); - priv_unlock(priv); - if (ret) - WARN("unable to disable interrupt on rx queue %d", - rx_queue_id); - return -ret; + mlx5_rxq_ibv_release(rxq_ibv); + DRV_LOG(WARNING, "port %u unable to disable interrupt on Rx queue %d", + dev->data->port_id, rx_queue_id); + rte_errno = ret; /* Restore rte_errno. */ + return -rte_errno; } /** * Create the Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return - * The Verbs object initialised if it can be created. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_rxq_ibv* -mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_new(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); struct ibv_wq_attr mod; union { - struct ibv_cq_init_attr_ex cq; + struct { + struct ibv_cq_init_attr_ex ibv; + struct mlx5dv_cq_init_attr mlx5; + } cq; struct ibv_wq_init_attr wq; struct ibv_cq_ex cq_attr; } attr; @@ -1039,56 +633,78 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) unsigned int i; int ret = 0; struct mlx5dv_obj obj; + struct mlx5_dev_config *config = &priv->config; assert(rxq_data); assert(!rxq_ctrl->ibv); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_RX_QUEUE; + priv->verbs_alloc_ctx.obj = rxq_ctrl; tmpl = rte_calloc_socket(__func__, 1, sizeof(*tmpl), 0, rxq_ctrl->socket); if (!tmpl) { - ERROR("%p: cannot allocate verbs resources", - (void *)rxq_ctrl); + DRV_LOG(ERR, + "port %u Rx queue %u cannot allocate verbs resources", + dev->data->port_id, rxq_ctrl->idx); + rte_errno = ENOMEM; goto error; } tmpl->rxq_ctrl = rxq_ctrl; /* Use the entire RX mempool as the memory region. */ - tmpl->mr = priv_mr_get(priv, rxq_data->mp); + tmpl->mr = mlx5_mr_get(dev, rxq_data->mp); if (!tmpl->mr) { - tmpl->mr = priv_mr_new(priv, rxq_data->mp); + tmpl->mr = mlx5_mr_new(dev, rxq_data->mp); if (!tmpl->mr) { - ERROR("%p: MR creation failure", (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u: memeroy region creation failure", + dev->data->port_id); goto error; } } if (rxq_ctrl->irq) { - tmpl->channel = ibv_create_comp_channel(priv->ctx); + tmpl->channel = mlx5_glue->create_comp_channel(priv->ctx); if (!tmpl->channel) { - ERROR("%p: Comp Channel creation failure", - (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u: comp channel creation failure", + dev->data->port_id); + rte_errno = ENOMEM; goto error; } } - attr.cq = (struct ibv_cq_init_attr_ex){ + attr.cq.ibv = (struct ibv_cq_init_attr_ex){ + .cqe = cqe_n, + .channel = tmpl->channel, .comp_mask = 0, }; - if (priv->cqe_comp) { - attr.cq.comp_mask |= IBV_CQ_INIT_ATTR_MASK_FLAGS; - attr.cq.flags |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){ + .comp_mask = 0, + }; + if (config->cqe_comp && !rxq_data->hw_timestamp) { + attr.cq.mlx5.comp_mask |= + MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; /* * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. */ - if (rxq_check_vec_support(rxq_data) < 0) - cqe_n *= 2; + if (mlx5_rxq_check_vec_support(rxq_data) < 0) + attr.cq.ibv.cqe *= 2; + } else if (config->cqe_comp && rxq_data->hw_timestamp) { + DRV_LOG(DEBUG, + "port %u Rx CQE compression is disabled for HW" + " timestamp", + dev->data->port_id); } - tmpl->cq = ibv_create_cq(priv->ctx, cqe_n, NULL, tmpl->channel, 0); + tmpl->cq = mlx5_glue->cq_ex_to_cq + (mlx5_glue->dv_create_cq(priv->ctx, &attr.cq.ibv, + &attr.cq.mlx5)); if (tmpl->cq == NULL) { - ERROR("%p: CQ creation failure", (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u Rx queue %u CQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; goto error; } - DEBUG("priv->device_attr.max_qp_wr is %d", - priv->device_attr.orig_attr.max_qp_wr); - DEBUG("priv->device_attr.max_sge is %d", - priv->device_attr.orig_attr.max_sge); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_qp_wr is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_qp_wr); + DRV_LOG(DEBUG, "port %u priv->device_attr.max_sge is %d", + dev->data->port_id, priv->device_attr.orig_attr.max_sge); attr.wq = (struct ibv_wq_init_attr){ .wq_context = NULL, /* Could be useful in the future. */ .wq_type = IBV_WQT_RQ, @@ -1111,14 +727,16 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; } #ifdef HAVE_IBV_WQ_FLAG_RX_END_PADDING - if (priv->hw_padding) { + if (config->hw_padding) { attr.wq.create_flags |= IBV_WQ_FLAG_RX_END_PADDING; attr.wq.comp_mask |= IBV_WQ_INIT_ATTR_FLAGS; } #endif - tmpl->wq = ibv_create_wq(priv->ctx, &attr.wq); + tmpl->wq = mlx5_glue->create_wq(priv->ctx, &attr.wq); if (tmpl->wq == NULL) { - ERROR("%p: WQ creation failure", (void *)rxq_ctrl); + DRV_LOG(ERR, "port %u Rx queue %u WQ creation failure", + dev->data->port_id, idx); + rte_errno = ENOMEM; goto error; } /* @@ -1128,11 +746,14 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) if (((int)attr.wq.max_wr != ((1 << rxq_data->elts_n) >> rxq_data->sges_n)) || ((int)attr.wq.max_sge != (1 << rxq_data->sges_n))) { - ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs", - (void *)rxq_ctrl, - ((1 << rxq_data->elts_n) >> rxq_data->sges_n), - (1 << rxq_data->sges_n), - attr.wq.max_wr, attr.wq.max_sge); + DRV_LOG(ERR, + "port %u Rx queue %u requested %u*%u but got %u*%u" + " WRs*SGEs", + dev->data->port_id, idx, + ((1 << rxq_data->elts_n) >> rxq_data->sges_n), + (1 << rxq_data->sges_n), + attr.wq.max_wr, attr.wq.max_sge); + rte_errno = EINVAL; goto error; } /* Change queue state to ready. */ @@ -1140,22 +761,29 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) .attr_mask = IBV_WQ_ATTR_STATE, .wq_state = IBV_WQS_RDY, }; - ret = ibv_modify_wq(tmpl->wq, &mod); + ret = mlx5_glue->modify_wq(tmpl->wq, &mod); if (ret) { - ERROR("%p: WQ state to IBV_WQS_RDY failed", - (void *)rxq_ctrl); + DRV_LOG(ERR, + "port %u Rx queue %u WQ state to IBV_WQS_RDY failed", + dev->data->port_id, idx); + rte_errno = ret; goto error; } obj.cq.in = tmpl->cq; obj.cq.out = &cq_info; obj.rwq.in = tmpl->wq; obj.rwq.out = &rwq; - ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ); - if (ret != 0) + ret = mlx5_glue->dv_init_obj(&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ); + if (ret) { + rte_errno = ret; goto error; + } if (cq_info.cqe_size != RTE_CACHE_LINE_SIZE) { - ERROR("Wrong MLX5_CQE_SIZE environment variable value: " - "it should be set to %u", RTE_CACHE_LINE_SIZE); + DRV_LOG(ERR, + "port %u wrong MLX5_CQE_SIZE environment variable" + " value: it should be set to %u", + dev->data->port_id, RTE_CACHE_LINE_SIZE); + rte_errno = EINVAL; goto error; } /* Fill the rings. */ @@ -1184,42 +812,51 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) }; rxq_data->cq_db = cq_info.dbrec; rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf; + rxq_data->cq_uar = cq_info.cq_uar; + rxq_data->cqn = cq_info.cqn; + rxq_data->cq_arm_sn = 0; /* Update doorbell counter. */ rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n; rte_wmb(); *rxq_data->rq_db = rte_cpu_to_be_32(rxq_data->rq_ci); - DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); + DRV_LOG(DEBUG, "port %u rxq %u updated with %p", dev->data->port_id, + idx, (void *)&tmpl); rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", + dev->data->port_id, idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsibv, tmpl, next); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; return tmpl; error: + ret = rte_errno; /* Save rte_errno before cleanup. */ if (tmpl->wq) - claim_zero(ibv_destroy_wq(tmpl->wq)); + claim_zero(mlx5_glue->destroy_wq(tmpl->wq)); if (tmpl->cq) - claim_zero(ibv_destroy_cq(tmpl->cq)); + claim_zero(mlx5_glue->destroy_cq(tmpl->cq)); if (tmpl->channel) - claim_zero(ibv_destroy_comp_channel(tmpl->channel)); + claim_zero(mlx5_glue->destroy_comp_channel(tmpl->channel)); if (tmpl->mr) - priv_mr_release(priv, tmpl->mr); + mlx5_mr_release(tmpl->mr); + priv->verbs_alloc_ctx.type = MLX5_VERBS_ALLOC_TYPE_NONE; + rte_errno = ret; /* Restore rte_errno. */ return NULL; } /** * Get an Rx queue Verbs object. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * Queue index in DPDK Rx queue array * * @return * The Verbs object if it exists. */ -struct mlx5_rxq_ibv* -mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ibv * +mlx5_rxq_ibv_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data = (*priv->rxqs)[idx]; struct mlx5_rxq_ctrl *rxq_ctrl; @@ -1229,11 +866,11 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) return NULL; rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); if (rxq_ctrl->ibv) { - priv_mr_get(priv, rxq_data->mp); + mlx5_mr_get(dev, rxq_data->mp); rte_atomic32_inc(&rxq_ctrl->ibv->refcnt); - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl->ibv, - rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", + dev->data->port_id, rxq_ctrl->idx, + rte_atomic32_read(&rxq_ctrl->ibv->refcnt)); } return rxq_ctrl->ibv; } @@ -1241,16 +878,14 @@ mlx5_priv_rxq_ibv_get(struct priv *priv, uint16_t idx) /** * Release an Rx verbs queue object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_release(struct mlx5_rxq_ibv *rxq_ibv) { int ret; @@ -1258,41 +893,45 @@ mlx5_priv_rxq_ibv_release(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) assert(rxq_ibv->wq); assert(rxq_ibv->cq); assert(rxq_ibv->mr); - ret = priv_mr_release(priv, rxq_ibv->mr); + ret = mlx5_mr_release(rxq_ibv->mr); if (!ret) rxq_ibv->mr = NULL; - DEBUG("%p: Verbs Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ibv, rte_atomic32_read(&rxq_ibv->refcnt)); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u: refcnt %d", + rxq_ibv->rxq_ctrl->priv->dev->data->port_id, + rxq_ibv->rxq_ctrl->idx, rte_atomic32_read(&rxq_ibv->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ibv->refcnt)) { rxq_free_elts(rxq_ibv->rxq_ctrl); - claim_zero(ibv_destroy_wq(rxq_ibv->wq)); - claim_zero(ibv_destroy_cq(rxq_ibv->cq)); + claim_zero(mlx5_glue->destroy_wq(rxq_ibv->wq)); + claim_zero(mlx5_glue->destroy_cq(rxq_ibv->cq)); if (rxq_ibv->channel) - claim_zero(ibv_destroy_comp_channel(rxq_ibv->channel)); + claim_zero(mlx5_glue->destroy_comp_channel + (rxq_ibv->channel)); LIST_REMOVE(rxq_ibv, next); rte_free(rxq_ibv); return 0; } - return EBUSY; + return 1; } /** * Verify the Verbs Rx queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_rxq_ibv_verify(struct priv *priv) +mlx5_rxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; int ret = 0; struct mlx5_rxq_ibv *rxq_ibv; LIST_FOREACH(rxq_ibv, &priv->rxqsibv, next) { - DEBUG("%p: Verbs Rx queue %p still referenced", (void *)priv, - (void *)rxq_ibv); + DRV_LOG(DEBUG, "port %u Verbs Rx queue %u still referenced", + dev->data->port_id, rxq_ibv->rxq_ctrl->idx); ++ret; } return ret; @@ -1301,15 +940,12 @@ mlx5_priv_rxq_ibv_verify(struct priv *priv) /** * Return true if a single reference exists on the object. * - * @param priv - * Pointer to private structure. * @param rxq_ibv * Verbs Rx queue object. */ int -mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) +mlx5_rxq_ibv_releasable(struct mlx5_rxq_ibv *rxq_ibv) { - (void)priv; assert(rxq_ibv); return (rte_atomic32_read(&rxq_ibv->refcnt) == 1); } @@ -1317,8 +953,8 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) /** * Create a DPDK Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * @param desc @@ -1327,24 +963,33 @@ mlx5_priv_rxq_ibv_releasable(struct priv *priv, struct mlx5_rxq_ibv *rxq_ibv) * NUMA socket on which memory must be allocated. * * @return - * A DPDK queue object on success. + * A DPDK queue object on success, NULL otherwise and rte_errno is set. */ -struct mlx5_rxq_ctrl* -mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, - unsigned int socket, struct rte_mempool *mp) +struct mlx5_rxq_ctrl * +mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, + unsigned int socket, const struct rte_eth_rxconf *conf, + struct rte_mempool *mp) { - struct rte_eth_dev *dev = priv->dev; + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *tmpl; - const uint16_t desc_n = - desc + priv->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + struct mlx5_dev_config *config = &priv->config; + /* + * Always allocate extra slots, even if eventually + * the vector Rx will not be used. + */ + const uint16_t desc_n = + desc + config->rx_vec_en * MLX5_VPMD_DESCS_PER_LOOP; tmpl = rte_calloc_socket("RXQ", 1, sizeof(*tmpl) + desc_n * sizeof(struct rte_mbuf *), 0, socket); - if (!tmpl) + if (!tmpl) { + rte_errno = ENOMEM; return NULL; + } + tmpl->socket = socket; if (priv->dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; /* Enable scattered packets support for this queue if necessary. */ @@ -1352,7 +997,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= (mb_len - RTE_PKTMBUF_HEADROOM)) { tmpl->rxq.sges_n = 0; - } else if (dev->data->dev_conf.rxmode.enable_scatter) { + } else if (conf->offloads & DEV_RX_OFFLOAD_SCATTER) { unsigned int size = RTE_PKTMBUF_HEADROOM + dev->data->dev_conf.rxmode.max_rx_pkt_len; @@ -1368,59 +1013,65 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, size = mb_len * (1 << tmpl->rxq.sges_n); size -= RTE_PKTMBUF_HEADROOM; if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { - ERROR("%p: too many SGEs (%u) needed to handle" - " requested maximum packet size %u", - (void *)dev, - 1 << sges_n, - dev->data->dev_conf.rxmode.max_rx_pkt_len); + DRV_LOG(ERR, + "port %u too many SGEs (%u) needed to handle" + " requested maximum packet size %u", + dev->data->port_id, + 1 << sges_n, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + rte_errno = EOVERFLOW; goto error; } } else { - WARN("%p: the requested maximum Rx packet size (%u) is" - " larger than a single mbuf (%u) and scattered" - " mode has not been requested", - (void *)dev, - dev->data->dev_conf.rxmode.max_rx_pkt_len, - mb_len - RTE_PKTMBUF_HEADROOM); + DRV_LOG(WARNING, + "port %u the requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered mode has" + " not been requested", + dev->data->port_id, + dev->data->dev_conf.rxmode.max_rx_pkt_len, + mb_len - RTE_PKTMBUF_HEADROOM); } - DEBUG("%p: maximum number of segments per packet: %u", - (void *)dev, 1 << tmpl->rxq.sges_n); + DRV_LOG(DEBUG, "port %u maximum number of segments per packet: %u", + dev->data->port_id, 1 << tmpl->rxq.sges_n); if (desc % (1 << tmpl->rxq.sges_n)) { - ERROR("%p: number of RX queue descriptors (%u) is not a" - " multiple of SGEs per packet (%u)", - (void *)dev, - desc, - 1 << tmpl->rxq.sges_n); + DRV_LOG(ERR, + "port %u number of Rx queue descriptors (%u) is not a" + " multiple of SGEs per packet (%u)", + dev->data->port_id, + desc, + 1 << tmpl->rxq.sges_n); + rte_errno = EINVAL; goto error; } /* Toggle RX checksum offload if hardware supports it. */ - if (priv->hw_csum) - tmpl->rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - if (priv->hw_csum_l2tun) - tmpl->rxq.csum_l2tun = - !!dev->data->dev_conf.rxmode.hw_ip_checksum; + tmpl->rxq.csum = !!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM); + tmpl->rxq.csum_l2tun = (!!(conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) && + priv->config.tunnel_en); + tmpl->rxq.hw_timestamp = !!(conf->offloads & DEV_RX_OFFLOAD_TIMESTAMP); /* Configure VLAN stripping. */ - tmpl->rxq.vlan_strip = (priv->hw_vlan_strip && - !!dev->data->dev_conf.rxmode.hw_vlan_strip); + tmpl->rxq.vlan_strip = !!(conf->offloads & DEV_RX_OFFLOAD_VLAN_STRIP); /* By default, FCS (CRC) is stripped by hardware. */ - if (dev->data->dev_conf.rxmode.hw_strip_crc) { + if (conf->offloads & DEV_RX_OFFLOAD_CRC_STRIP) { tmpl->rxq.crc_present = 0; - } else if (priv->hw_fcs_strip) { + } else if (config->hw_fcs_strip) { tmpl->rxq.crc_present = 1; } else { - WARN("%p: CRC stripping has been disabled but will still" - " be performed by hardware, make sure MLNX_OFED and" - " firmware are up to date", - (void *)dev); + DRV_LOG(WARNING, + "port %u CRC stripping has been disabled but will" + " still be performed by hardware, make sure MLNX_OFED" + " and firmware are up to date", + dev->data->port_id); tmpl->rxq.crc_present = 0; } - DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" - " incoming frames to hide it", - (void *)dev, - tmpl->rxq.crc_present ? "disabled" : "enabled", - tmpl->rxq.crc_present << 2); + DRV_LOG(DEBUG, + "port %u CRC stripping is %s, %u bytes will be subtracted from" + " incoming frames to hide it", + dev->data->port_id, + tmpl->rxq.crc_present ? "disabled" : "enabled", + tmpl->rxq.crc_present << 2); /* Save port ID. */ - tmpl->rxq.rss_hash = priv->rxqs_n > 1; + tmpl->rxq.rss_hash = !!priv->rss_conf.rss_hf && + (!!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS)); tmpl->rxq.port_id = dev->data->port_id; tmpl->priv = priv; tmpl->rxq.mp = mp; @@ -1428,9 +1079,10 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, tmpl->rxq.elts_n = log2above(desc); tmpl->rxq.elts = (struct rte_mbuf *(*)[1 << tmpl->rxq.elts_n])(tmpl + 1); + tmpl->idx = idx; rte_atomic32_inc(&tmpl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)tmpl, rte_atomic32_read(&tmpl->refcnt)); + DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id, + idx, rte_atomic32_read(&tmpl->refcnt)); LIST_INSERT_HEAD(&priv->rxqsctrl, tmpl, next); return tmpl; error: @@ -1441,28 +1093,29 @@ error: /** * Get a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * A pointer to the queue if it exists. + * A pointer to the queue if it exists, NULL otherwise. */ -struct mlx5_rxq_ctrl* -mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) +struct mlx5_rxq_ctrl * +mlx5_rxq_get(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl = NULL; if ((*priv->rxqs)[idx]) { rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); - - mlx5_priv_rxq_ibv_get(priv, idx); + mlx5_rxq_ibv_get(dev, idx); rte_atomic32_inc(&rxq_ctrl->refcnt); - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); + DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", + dev->data->port_id, rxq_ctrl->idx, + rte_atomic32_read(&rxq_ctrl->refcnt)); } return rxq_ctrl; } @@ -1470,59 +1123,59 @@ mlx5_priv_rxq_get(struct priv *priv, uint16_t idx) /** * Release a Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_rxq_release(struct priv *priv, uint16_t idx) +mlx5_rxq_release(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; if (!(*priv->rxqs)[idx]) return 0; rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); assert(rxq_ctrl->priv); - if (rxq_ctrl->ibv) { - int ret; - - ret = mlx5_priv_rxq_ibv_release(rxq_ctrl->priv, rxq_ctrl->ibv); - if (!ret) - rxq_ctrl->ibv = NULL; - } - DEBUG("%p: Rx queue %p: refcnt %d", (void *)priv, - (void *)rxq_ctrl, rte_atomic32_read(&rxq_ctrl->refcnt)); + if (rxq_ctrl->ibv && !mlx5_rxq_ibv_release(rxq_ctrl->ibv)) + rxq_ctrl->ibv = NULL; + DRV_LOG(DEBUG, "port %u Rx queue %u: refcnt %d", dev->data->port_id, + rxq_ctrl->idx, rte_atomic32_read(&rxq_ctrl->refcnt)); if (rte_atomic32_dec_and_test(&rxq_ctrl->refcnt)) { LIST_REMOVE(rxq_ctrl, next); rte_free(rxq_ctrl); (*priv->rxqs)[idx] = NULL; return 0; } - return EBUSY; + return 1; } /** * Verify if the queue can be released. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param idx * TX queue index. * * @return - * 1 if the queue can be released. + * 1 if the queue can be released, negative errno otherwise and rte_errno is + * set. */ int -mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) +mlx5_rxq_releasable(struct rte_eth_dev *dev, uint16_t idx) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; - if (!(*priv->rxqs)[idx]) - return -1; + if (!(*priv->rxqs)[idx]) { + rte_errno = EINVAL; + return -rte_errno; + } rxq_ctrl = container_of((*priv->rxqs)[idx], struct mlx5_rxq_ctrl, rxq); return (rte_atomic32_read(&rxq_ctrl->refcnt) == 1); } @@ -1530,20 +1183,22 @@ mlx5_priv_rxq_releasable(struct priv *priv, uint16_t idx) /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_rxq_verify(struct priv *priv) +mlx5_rxq_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; LIST_FOREACH(rxq_ctrl, &priv->rxqsctrl, next) { - DEBUG("%p: Rx Queue %p still referenced", (void *)priv, - (void *)rxq_ctrl); + DRV_LOG(DEBUG, "port %u Rx Queue %u still referenced", + dev->data->port_id, rxq_ctrl->idx); ++ret; } return ret; @@ -1552,35 +1207,37 @@ mlx5_priv_rxq_verify(struct priv *priv) /** * Create an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n * Number of queues in the array. * * @return - * A new indirection table. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_ind_table_ibv* -mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_new(struct rte_eth_dev *dev, uint16_t queues[], + uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : - priv->ind_table_max_size; + log2above(priv->config.ind_table_max_size); struct ibv_wq *wq[1 << wq_n]; unsigned int i; unsigned int j; ind_tbl = rte_calloc(__func__, 1, sizeof(*ind_tbl) + queues_n * sizeof(uint16_t), 0); - if (!ind_tbl) + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; + } for (i = 0; i != queues_n; ++i) { - struct mlx5_rxq_ctrl *rxq = - mlx5_priv_rxq_get(priv, queues[i]); + struct mlx5_rxq_ctrl *rxq = mlx5_rxq_get(dev, queues[i]); if (!rxq) goto error; @@ -1591,31 +1248,35 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], /* Finalise indirection table. */ for (j = 0; i != (unsigned int)(1 << wq_n); ++i, ++j) wq[i] = wq[j]; - ind_tbl->ind_table = ibv_create_rwq_ind_table( - priv->ctx, - &(struct ibv_rwq_ind_table_init_attr){ + ind_tbl->ind_table = mlx5_glue->create_rwq_ind_table + (priv->ctx, + &(struct ibv_rwq_ind_table_init_attr){ .log_ind_tbl_size = wq_n, .ind_tbl = wq, .comp_mask = 0, - }); - if (!ind_tbl->ind_table) + }); + if (!ind_tbl->ind_table) { + rte_errno = errno; goto error; + } rte_atomic32_inc(&ind_tbl->refcnt); LIST_INSERT_HEAD(&priv->ind_tbls, ind_tbl, next); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", + dev->data->port_id, (void *)ind_tbl, + rte_atomic32_read(&ind_tbl->refcnt)); return ind_tbl; error: rte_free(ind_tbl); - DEBUG("%p cannot create indirection table", (void *)priv); + DRV_LOG(DEBUG, "port %u cannot create indirection table", + dev->data->port_id); return NULL; } /** * Get an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param queues * Queues entering in the indirection table. * @param queues_n @@ -1624,10 +1285,11 @@ error: * @return * An indirection table if found. */ -struct mlx5_ind_table_ibv* -mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], - uint16_t queues_n) +struct mlx5_ind_table_ibv * +mlx5_ind_table_ibv_get(struct rte_eth_dev *dev, uint16_t queues[], + uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { @@ -1641,10 +1303,11 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], unsigned int i; rte_atomic32_inc(&ind_tbl->refcnt); - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", + dev->data->port_id, (void *)ind_tbl, + rte_atomic32_read(&ind_tbl->refcnt)); for (i = 0; i != ind_tbl->queues_n; ++i) - mlx5_priv_rxq_get(priv, ind_tbl->queues[i]); + mlx5_rxq_get(dev, ind_tbl->queues[i]); } return ind_tbl; } @@ -1652,51 +1315,56 @@ mlx5_priv_ind_table_ibv_get(struct priv *priv, uint16_t queues[], /** * Release an indirection table. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param ind_table * Indirection table to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_ind_table_ibv_release(struct priv *priv, - struct mlx5_ind_table_ibv *ind_tbl) +mlx5_ind_table_ibv_release(struct rte_eth_dev *dev, + struct mlx5_ind_table_ibv *ind_tbl) { unsigned int i; - DEBUG("%p: Indirection table %p: refcnt %d", (void *)priv, - (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); + DRV_LOG(DEBUG, "port %u indirection table %p: refcnt %d", + ((struct priv *)dev->data->dev_private)->port, + (void *)ind_tbl, rte_atomic32_read(&ind_tbl->refcnt)); if (rte_atomic32_dec_and_test(&ind_tbl->refcnt)) - claim_zero(ibv_destroy_rwq_ind_table(ind_tbl->ind_table)); + claim_zero(mlx5_glue->destroy_rwq_ind_table + (ind_tbl->ind_table)); for (i = 0; i != ind_tbl->queues_n; ++i) - claim_nonzero(mlx5_priv_rxq_release(priv, ind_tbl->queues[i])); + claim_nonzero(mlx5_rxq_release(dev, ind_tbl->queues[i])); if (!rte_atomic32_read(&ind_tbl->refcnt)) { LIST_REMOVE(ind_tbl, next); rte_free(ind_tbl); return 0; } - return EBUSY; + return 1; } /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_ind_table_ibv_verify(struct priv *priv) +mlx5_ind_table_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_ind_table_ibv *ind_tbl; int ret = 0; LIST_FOREACH(ind_tbl, &priv->ind_tbls, next) { - DEBUG("%p: Verbs indirection table %p still referenced", - (void *)priv, (void *)ind_tbl); + DRV_LOG(DEBUG, + "port %u Verbs indirection table %p still referenced", + dev->data->port_id, (void *)ind_tbl); ++ret; } return ret; @@ -1705,8 +1373,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) /** * Create an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_key * RSS key for the Rx hash queue. * @param rss_key_len @@ -1714,29 +1382,35 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) * @param hash_fields * Verbs protocol hash field to make the RSS on. * @param queues - * Queues entering in hash queue. + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. * * @return - * An hash Rx queue on success. + * The Verbs object initialised, NULL otherwise and rte_errno is set. */ -struct mlx5_hrxq* -mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +struct mlx5_hrxq * +mlx5_hrxq_new(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, + uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; + int err; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); - if (!ind_tbl) - ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n); + queues_n = hash_fields ? queues_n : 1; + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) + ind_tbl = mlx5_ind_table_ibv_new(dev, queues, queues_n); + if (!ind_tbl) { + rte_errno = ENOMEM; return NULL; - qp = ibv_create_qp_ex( - priv->ctx, - &(struct ibv_qp_init_attr_ex){ + } + qp = mlx5_glue->create_qp_ex + (priv->ctx, + &(struct ibv_qp_init_attr_ex){ .qp_type = IBV_QPT_RAW_PACKET, .comp_mask = IBV_QP_INIT_ATTR_PD | @@ -1750,9 +1424,11 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, }, .rwq_ind_tbl = ind_tbl->ind_table, .pd = priv->pd, - }); - if (!qp) + }); + if (!qp) { + rte_errno = errno; goto error; + } hrxq = rte_calloc(__func__, 1, sizeof(*hrxq) + rss_key_len, 0); if (!hrxq) goto error; @@ -1763,37 +1439,43 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, memcpy(hrxq->rss_key, rss_key, rss_key_len); rte_atomic32_inc(&hrxq->refcnt); LIST_INSERT_HEAD(&priv->hrxqs, hrxq, next); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", + dev->data->port_id, (void *)hrxq, + rte_atomic32_read(&hrxq->refcnt)); return hrxq; error: - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + err = rte_errno; /* Save rte_errno before cleanup. */ + mlx5_ind_table_ibv_release(dev, ind_tbl); if (qp) - claim_zero(ibv_destroy_qp(qp)); + claim_zero(mlx5_glue->destroy_qp(qp)); + rte_errno = err; /* Restore rte_errno. */ return NULL; } /** * Get an Rx Hash queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param rss_conf * RSS configuration for the Rx hash queue. * @param queues - * Queues entering in hash queue. + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. * * @return * An hash Rx queue on success. */ -struct mlx5_hrxq* -mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, - uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) +struct mlx5_hrxq * +mlx5_hrxq_get(struct rte_eth_dev *dev, uint8_t *rss_key, uint8_t rss_key_len, + uint64_t hash_fields, uint16_t queues[], uint16_t queues_n) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; + queues_n = hash_fields ? queues_n : 1; LIST_FOREACH(hrxq, &priv->hrxqs, next) { struct mlx5_ind_table_ibv *ind_tbl; @@ -1803,16 +1485,17 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, continue; if (hrxq->hash_fields != hash_fields) continue; - ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); + ind_tbl = mlx5_ind_table_ibv_get(dev, queues, queues_n); if (!ind_tbl) continue; if (ind_tbl != hrxq->ind_table) { - mlx5_priv_ind_table_ibv_release(priv, ind_tbl); + mlx5_ind_table_ibv_release(dev, ind_tbl); continue; } rte_atomic32_inc(&hrxq->refcnt); - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", + dev->data->port_id, (void *)hrxq, + rte_atomic32_read(&hrxq->refcnt)); return hrxq; } return NULL; @@ -1821,47 +1504,51 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, /** * Release the hash Rx queue. * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * @param hrxq * Pointer to Hash Rx queue to release. * * @return - * 0 on success, errno value on failure. + * 1 while a reference on it exists, 0 when freed. */ int -mlx5_priv_hrxq_release(struct priv *priv, struct mlx5_hrxq *hrxq) +mlx5_hrxq_release(struct rte_eth_dev *dev, struct mlx5_hrxq *hrxq) { - DEBUG("%p: Hash Rx queue %p: refcnt %d", (void *)priv, - (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); + DRV_LOG(DEBUG, "port %u hash Rx queue %p: refcnt %d", + ((struct priv *)dev->data->dev_private)->port, + (void *)hrxq, rte_atomic32_read(&hrxq->refcnt)); if (rte_atomic32_dec_and_test(&hrxq->refcnt)) { - claim_zero(ibv_destroy_qp(hrxq->qp)); - mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table); + claim_zero(mlx5_glue->destroy_qp(hrxq->qp)); + mlx5_ind_table_ibv_release(dev, hrxq->ind_table); LIST_REMOVE(hrxq, next); rte_free(hrxq); return 0; } - claim_nonzero(mlx5_priv_ind_table_ibv_release(priv, hrxq->ind_table)); - return EBUSY; + claim_nonzero(mlx5_ind_table_ibv_release(dev, hrxq->ind_table)); + return 1; } /** * Verify the Rx Queue list is empty * - * @param priv - * Pointer to private structure. + * @param dev + * Pointer to Ethernet device. * - * @return the number of object not released. + * @return + * The number of object not released. */ int -mlx5_priv_hrxq_ibv_verify(struct priv *priv) +mlx5_hrxq_ibv_verify(struct rte_eth_dev *dev) { + struct priv *priv = dev->data->dev_private; struct mlx5_hrxq *hrxq; int ret = 0; LIST_FOREACH(hrxq, &priv->hrxqs, next) { - DEBUG("%p: Verbs Hash Rx queue %p still referenced", - (void *)priv, (void *)hrxq); + DRV_LOG(DEBUG, + "port %u Verbs hash Rx queue %p still referenced", + dev->data->port_id, (void *)hrxq); ++ret; } return ret;