X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=a4cdd374a1fd47a493c68029eca118fcc4ab9877;hb=2eefbec531c7abf4c9c668f7941b285858c06733;hp=d3cd58e3bb3e934a1d28bce23338ee9dcaf4128e;hpb=6a6b6828fe6a08a9ab3cd91c2d21e23312095554;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index d3cd58e3bb..a4cdd374a1 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -64,122 +64,6 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" -/* Initialization data for hash RX queues. */ -const struct hash_rxq_init hash_rxq_init[] = { - [HASH_RXQ_TCPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, - .flow_priority = 0, - .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_TCP, - .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], - }, - [HASH_RXQ_UDPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, - .flow_priority = 0, - .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_UDP, - .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], - }, - [HASH_RXQ_IPV4] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV4 | - IBV_RX_HASH_DST_IPV4), - .dpdk_rss_hf = (ETH_RSS_IPV4 | - ETH_RSS_FRAG_IPV4), - .flow_priority = 1, - .flow_spec.ipv4 = { - .type = IBV_FLOW_SPEC_IPV4, - .size = sizeof(hash_rxq_init[0].flow_spec.ipv4), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_ETH], - }, - [HASH_RXQ_TCPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_TCP | - IBV_RX_HASH_DST_PORT_TCP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, - .flow_priority = 0, - .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_TCP, - .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], - }, - [HASH_RXQ_UDPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6 | - IBV_RX_HASH_SRC_PORT_UDP | - IBV_RX_HASH_DST_PORT_UDP), - .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, - .flow_priority = 0, - .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_UDP, - .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], - }, - [HASH_RXQ_IPV6] = { - .hash_fields = (IBV_RX_HASH_SRC_IPV6 | - IBV_RX_HASH_DST_IPV6), - .dpdk_rss_hf = (ETH_RSS_IPV6 | - ETH_RSS_FRAG_IPV6), - .flow_priority = 1, - .flow_spec.ipv6 = { - .type = IBV_FLOW_SPEC_IPV6, - .size = sizeof(hash_rxq_init[0].flow_spec.ipv6), - }, - .underlayer = &hash_rxq_init[HASH_RXQ_ETH], - }, - [HASH_RXQ_ETH] = { - .hash_fields = 0, - .dpdk_rss_hf = 0, - .flow_priority = 2, - .flow_spec.eth = { - .type = IBV_FLOW_SPEC_ETH, - .size = sizeof(hash_rxq_init[0].flow_spec.eth), - }, - .underlayer = NULL, - }, -}; - -/* Number of entries in hash_rxq_init[]. */ -const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); - -/* Initialization data for hash RX queue indirection tables. */ -static const struct ind_table_init ind_table_init[] = { - { - .max_size = -1u, /* Superseded by HW limitations. */ - .hash_types = - 1 << HASH_RXQ_TCPV4 | - 1 << HASH_RXQ_UDPV4 | - 1 << HASH_RXQ_IPV4 | - 1 << HASH_RXQ_TCPV6 | - 1 << HASH_RXQ_UDPV6 | - 1 << HASH_RXQ_IPV6 | - 0, - .hash_types_n = 6, - }, - { - .max_size = 1, - .hash_types = 1 << HASH_RXQ_ETH, - .hash_types_n = 1, - }, -}; - -#define IND_TABLE_INIT_N RTE_DIM(ind_table_init) - /* Default RSS hash key also used for ConnectX-3. */ uint8_t rss_hash_default_key[] = { 0x2c, 0xc6, 0x81, 0xd1, @@ -197,423 +81,6 @@ uint8_t rss_hash_default_key[] = { /* Length of the default RSS hash key. */ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); -/** - * Populate flow steering rule for a given hash RX queue type using - * information from hash_rxq_init[]. Nothing is written to flow_attr when - * flow_attr_size is not large enough, but the required size is still returned. - * - * @param priv - * Pointer to private structure. - * @param[out] flow_attr - * Pointer to flow attribute structure to fill. Note that the allocated - * area must be larger and large enough to hold all flow specifications. - * @param flow_attr_size - * Entire size of flow_attr and trailing room for flow specifications. - * @param type - * Hash RX queue type to use for flow steering rule. - * - * @return - * Total size of the flow attribute buffer. No errors are defined. - */ -size_t -priv_flow_attr(struct priv *priv, struct ibv_flow_attr *flow_attr, - size_t flow_attr_size, enum hash_rxq_type type) -{ - size_t offset = sizeof(*flow_attr); - const struct hash_rxq_init *init = &hash_rxq_init[type]; - - assert(priv != NULL); - assert((size_t)type < RTE_DIM(hash_rxq_init)); - do { - offset += init->flow_spec.hdr.size; - init = init->underlayer; - } while (init != NULL); - if (offset > flow_attr_size) - return offset; - flow_attr_size = offset; - init = &hash_rxq_init[type]; - *flow_attr = (struct ibv_flow_attr){ - .type = IBV_FLOW_ATTR_NORMAL, - /* Priorities < 3 are reserved for flow director. */ - .priority = init->flow_priority + 3, - .num_of_specs = 0, - .port = priv->port, - .flags = 0, - }; - do { - offset -= init->flow_spec.hdr.size; - memcpy((void *)((uintptr_t)flow_attr + offset), - &init->flow_spec, - init->flow_spec.hdr.size); - ++flow_attr->num_of_specs; - init = init->underlayer; - } while (init != NULL); - return flow_attr_size; -} - -/** - * Convert hash type position in indirection table initializer to - * hash RX queue type. - * - * @param table - * Indirection table initializer. - * @param pos - * Hash type position. - * - * @return - * Hash RX queue type. - */ -static enum hash_rxq_type -hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos) -{ - enum hash_rxq_type type = HASH_RXQ_TCPV4; - - assert(pos < table->hash_types_n); - do { - if ((table->hash_types & (1 << type)) && (pos-- == 0)) - break; - ++type; - } while (1); - return type; -} - -/** - * Filter out disabled hash RX queue types from ind_table_init[]. - * - * @param priv - * Pointer to private structure. - * @param[out] table - * Output table. - * - * @return - * Number of table entries. - */ -static unsigned int -priv_make_ind_table_init(struct priv *priv, - struct ind_table_init (*table)[IND_TABLE_INIT_N]) -{ - uint64_t rss_hf; - unsigned int i; - unsigned int j; - unsigned int table_n = 0; - /* Mandatory to receive frames not handled by normal hash RX queues. */ - unsigned int hash_types_sup = 1 << HASH_RXQ_ETH; - - rss_hf = priv->rss_hf; - /* Process other protocols only if more than one queue. */ - if (priv->rxqs_n > 1) - for (i = 0; (i != hash_rxq_init_n); ++i) - if (rss_hf & hash_rxq_init[i].dpdk_rss_hf) - hash_types_sup |= (1 << i); - - /* Filter out entries whose protocols are not in the set. */ - for (i = 0, j = 0; (i != IND_TABLE_INIT_N); ++i) { - unsigned int nb; - unsigned int h; - - /* j is increased only if the table has valid protocols. */ - assert(j <= i); - (*table)[j] = ind_table_init[i]; - (*table)[j].hash_types &= hash_types_sup; - for (h = 0, nb = 0; (h != hash_rxq_init_n); ++h) - if (((*table)[j].hash_types >> h) & 0x1) - ++nb; - (*table)[i].hash_types_n = nb; - if (nb) { - ++table_n; - ++j; - } - } - return table_n; -} - -/** - * Initialize hash RX queues and indirection table. - * - * @param priv - * Pointer to private structure. - * - * @return - * 0 on success, errno value on failure. - */ -int -priv_create_hash_rxqs(struct priv *priv) -{ - struct ibv_wq *wqs[priv->reta_idx_n]; - struct ind_table_init ind_table_init[IND_TABLE_INIT_N]; - unsigned int ind_tables_n = - priv_make_ind_table_init(priv, &ind_table_init); - unsigned int hash_rxqs_n = 0; - struct hash_rxq (*hash_rxqs)[] = NULL; - struct ibv_rwq_ind_table *(*ind_tables)[] = NULL; - unsigned int i; - unsigned int j; - unsigned int k; - int err = 0; - - assert(priv->ind_tables == NULL); - assert(priv->ind_tables_n == 0); - assert(priv->hash_rxqs == NULL); - assert(priv->hash_rxqs_n == 0); - assert(priv->pd != NULL); - assert(priv->ctx != NULL); - if (priv->isolated) - return 0; - if (priv->rxqs_n == 0) - return EINVAL; - assert(priv->rxqs != NULL); - if (ind_tables_n == 0) { - ERROR("all hash RX queue types have been filtered out," - " indirection table cannot be created"); - return EINVAL; - } - if (priv->rxqs_n & (priv->rxqs_n - 1)) { - INFO("%u RX queues are configured, consider rounding this" - " number to the next power of two for better balancing", - priv->rxqs_n); - DEBUG("indirection table extended to assume %u WQs", - priv->reta_idx_n); - } - for (i = 0; (i != priv->reta_idx_n); ++i) { - struct mlx5_rxq_ctrl *rxq_ctrl; - - rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]], - struct mlx5_rxq_ctrl, rxq); - wqs[i] = rxq_ctrl->ibv->wq; - } - /* Get number of hash RX queues to configure. */ - for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i) - hash_rxqs_n += ind_table_init[i].hash_types_n; - DEBUG("allocating %u hash RX queues for %u WQs, %u indirection tables", - hash_rxqs_n, priv->rxqs_n, ind_tables_n); - /* Create indirection tables. */ - ind_tables = rte_calloc(__func__, ind_tables_n, - sizeof((*ind_tables)[0]), 0); - if (ind_tables == NULL) { - err = ENOMEM; - ERROR("cannot allocate indirection tables container: %s", - strerror(err)); - goto error; - } - for (i = 0; (i != ind_tables_n); ++i) { - struct ibv_rwq_ind_table_init_attr ind_init_attr = { - .log_ind_tbl_size = 0, /* Set below. */ - .ind_tbl = wqs, - .comp_mask = 0, - }; - unsigned int ind_tbl_size = ind_table_init[i].max_size; - struct ibv_rwq_ind_table *ind_table; - - if (priv->reta_idx_n < ind_tbl_size) - ind_tbl_size = priv->reta_idx_n; - ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size); - errno = 0; - ind_table = ibv_create_rwq_ind_table(priv->ctx, - &ind_init_attr); - if (ind_table != NULL) { - (*ind_tables)[i] = ind_table; - continue; - } - /* Not clear whether errno is set. */ - err = (errno ? errno : EINVAL); - ERROR("RX indirection table creation failed with error %d: %s", - err, strerror(err)); - goto error; - } - /* Allocate array that holds hash RX queues and related data. */ - hash_rxqs = rte_calloc(__func__, hash_rxqs_n, - sizeof((*hash_rxqs)[0]), 0); - if (hash_rxqs == NULL) { - err = ENOMEM; - ERROR("cannot allocate hash RX queues container: %s", - strerror(err)); - goto error; - } - for (i = 0, j = 0, k = 0; - ((i != hash_rxqs_n) && (j != ind_tables_n)); - ++i) { - struct hash_rxq *hash_rxq = &(*hash_rxqs)[i]; - enum hash_rxq_type type = - hash_rxq_type_from_pos(&ind_table_init[j], k); - struct rte_eth_rss_conf *priv_rss_conf = - (*priv->rss_conf)[type]; - struct ibv_rx_hash_conf hash_conf = { - .rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = (priv_rss_conf ? - priv_rss_conf->rss_key_len : - rss_hash_default_key_len), - .rx_hash_key = (priv_rss_conf ? - priv_rss_conf->rss_key : - rss_hash_default_key), - .rx_hash_fields_mask = hash_rxq_init[type].hash_fields, - }; - struct ibv_qp_init_attr_ex qp_init_attr = { - .qp_type = IBV_QPT_RAW_PACKET, - .comp_mask = (IBV_QP_INIT_ATTR_PD | - IBV_QP_INIT_ATTR_IND_TABLE | - IBV_QP_INIT_ATTR_RX_HASH), - .rx_hash_conf = hash_conf, - .rwq_ind_tbl = (*ind_tables)[j], - .pd = priv->pd, - }; - - DEBUG("using indirection table %u for hash RX queue %u type %d", - j, i, type); - *hash_rxq = (struct hash_rxq){ - .priv = priv, - .qp = ibv_create_qp_ex(priv->ctx, &qp_init_attr), - .type = type, - }; - if (hash_rxq->qp == NULL) { - err = (errno ? errno : EINVAL); - ERROR("Hash RX QP creation failure: %s", - strerror(err)); - goto error; - } - if (++k < ind_table_init[j].hash_types_n) - continue; - /* Switch to the next indirection table and reset hash RX - * queue type array index. */ - ++j; - k = 0; - } - priv->ind_tables = ind_tables; - priv->ind_tables_n = ind_tables_n; - priv->hash_rxqs = hash_rxqs; - priv->hash_rxqs_n = hash_rxqs_n; - assert(err == 0); - return 0; -error: - if (hash_rxqs != NULL) { - for (i = 0; (i != hash_rxqs_n); ++i) { - struct ibv_qp *qp = (*hash_rxqs)[i].qp; - - if (qp == NULL) - continue; - claim_zero(ibv_destroy_qp(qp)); - } - rte_free(hash_rxqs); - } - if (ind_tables != NULL) { - for (j = 0; (j != ind_tables_n); ++j) { - struct ibv_rwq_ind_table *ind_table = - (*ind_tables)[j]; - - if (ind_table == NULL) - continue; - claim_zero(ibv_destroy_rwq_ind_table(ind_table)); - } - rte_free(ind_tables); - } - return err; -} - -/** - * Clean up hash RX queues and indirection table. - * - * @param priv - * Pointer to private structure. - */ -void -priv_destroy_hash_rxqs(struct priv *priv) -{ - unsigned int i; - - DEBUG("destroying %u hash RX queues", priv->hash_rxqs_n); - if (priv->hash_rxqs_n == 0) { - assert(priv->hash_rxqs == NULL); - assert(priv->ind_tables == NULL); - return; - } - for (i = 0; (i != priv->hash_rxqs_n); ++i) { - struct hash_rxq *hash_rxq = &(*priv->hash_rxqs)[i]; - unsigned int j, k; - - assert(hash_rxq->priv == priv); - assert(hash_rxq->qp != NULL); - /* Also check that there are no remaining flows. */ - for (j = 0; (j != RTE_DIM(hash_rxq->special_flow)); ++j) - for (k = 0; - (k != RTE_DIM(hash_rxq->special_flow[j])); - ++k) - assert(hash_rxq->special_flow[j][k] == NULL); - for (j = 0; (j != RTE_DIM(hash_rxq->mac_flow)); ++j) - for (k = 0; (k != RTE_DIM(hash_rxq->mac_flow[j])); ++k) - assert(hash_rxq->mac_flow[j][k] == NULL); - claim_zero(ibv_destroy_qp(hash_rxq->qp)); - } - priv->hash_rxqs_n = 0; - rte_free(priv->hash_rxqs); - priv->hash_rxqs = NULL; - for (i = 0; (i != priv->ind_tables_n); ++i) { - struct ibv_rwq_ind_table *ind_table = - (*priv->ind_tables)[i]; - - assert(ind_table != NULL); - claim_zero(ibv_destroy_rwq_ind_table(ind_table)); - } - priv->ind_tables_n = 0; - rte_free(priv->ind_tables); - priv->ind_tables = NULL; -} - -/** - * Check whether a given flow type is allowed. - * - * @param priv - * Pointer to private structure. - * @param type - * Flow type to check. - * - * @return - * Nonzero if the given flow type is allowed. - */ -int -priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) -{ - (void)priv; - switch (type) { - case HASH_RXQ_FLOW_TYPE_BROADCAST: - case HASH_RXQ_FLOW_TYPE_IPV6MULTI: - case HASH_RXQ_FLOW_TYPE_MAC: - return 1; - return 1; - default: - /* Unsupported flow type is not allowed. */ - return 0; - } - return 0; -} - -/** - * Automatically enable/disable flows according to configuration. - * - * @param priv - * Private structure. - * - * @return - * 0 on success, errno value on failure. - */ -int -priv_rehash_flows(struct priv *priv) -{ - size_t i; - - for (i = 0; i != RTE_DIM((*priv->hash_rxqs)[0].special_flow); ++i) - if (!priv_allow_flow_type(priv, i)) { - priv_special_flow_disable(priv, i); - } else { - int ret = priv_special_flow_enable(priv, i); - - if (ret) - return ret; - } - if (priv_allow_flow_type(priv, HASH_RXQ_FLOW_TYPE_MAC)) - return priv_mac_addrs_enable(priv); - priv_mac_addrs_disable(priv); - return 0; -} - /** * Allocate RX queue elements. * @@ -775,8 +242,6 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, int ret = 0; (void)conf; - if (mlx5_is_secondary()) - return -E_RTE_SECONDARY; priv_lock(priv); if (!rte_is_power_of_2(desc)) { desc = 1 << log2above(desc); @@ -827,9 +292,6 @@ mlx5_rx_queue_release(void *dpdk_rxq) struct mlx5_rxq_ctrl *rxq_ctrl; struct priv *priv; - if (mlx5_is_secondary()) - return; - if (rxq == NULL) return; rxq_ctrl = container_of(rxq, struct mlx5_rxq_ctrl, rxq); @@ -860,11 +322,10 @@ priv_rx_intr_vec_enable(struct priv *priv) unsigned int count = 0; struct rte_intr_handle *intr_handle = priv->dev->intr_handle; - assert(!mlx5_is_secondary()); if (!priv->dev->data->dev_conf.intr_conf.rxq) return 0; priv_rx_intr_vec_disable(priv); - intr_handle->intr_vec = malloc(sizeof(intr_handle->intr_vec[rxqs_n])); + intr_handle->intr_vec = malloc(n * sizeof(intr_handle->intr_vec[0])); if (intr_handle->intr_vec == NULL) { ERROR("failed to allocate memory for interrupt vector," " Rx interrupts will not be supported"); @@ -929,6 +390,8 @@ priv_rx_intr_vec_disable(struct priv *priv) if (!priv->dev->data->dev_conf.intr_conf.rxq) return; + if (!intr_handle->intr_vec) + goto free; for (i = 0; i != n; ++i) { struct mlx5_rxq_ctrl *rxq_ctrl; struct mlx5_rxq_data *rxq_data; @@ -944,8 +407,10 @@ priv_rx_intr_vec_disable(struct priv *priv) rxq_ctrl = container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); mlx5_priv_rxq_ibv_release(priv, rxq_ctrl->ibv); } +free: rte_intr_free_epoll_fd(intr_handle); - free(intr_handle->intr_vec); + if (intr_handle->intr_vec) + free(intr_handle->intr_vec); intr_handle->nb_efd = 0; intr_handle->intr_vec = NULL; } @@ -989,7 +454,7 @@ mlx5_arm_cq(struct mlx5_rxq_data *rxq, int sq_n_rxq) int mlx5_rx_intr_enable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; int ret = 0; @@ -1033,7 +498,7 @@ exit: int mlx5_rx_intr_disable(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - struct priv *priv = mlx5_get_priv(dev); + struct priv *priv = dev->data->dev_private; struct mlx5_rxq_data *rxq_data; struct mlx5_rxq_ctrl *rxq_ctrl; struct mlx5_rxq_ibv *rxq_ibv = NULL; @@ -1091,7 +556,10 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); struct ibv_wq_attr mod; union { - struct ibv_cq_init_attr_ex cq; + struct { + struct ibv_cq_init_attr_ex ibv; + struct mlx5dv_cq_init_attr mlx5; + } cq; struct ibv_wq_init_attr wq; struct ibv_cq_ex cq_attr; } attr; @@ -1130,20 +598,29 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) goto error; } } - attr.cq = (struct ibv_cq_init_attr_ex){ + attr.cq.ibv = (struct ibv_cq_init_attr_ex){ + .cqe = cqe_n, + .channel = tmpl->channel, + .comp_mask = 0, + }; + attr.cq.mlx5 = (struct mlx5dv_cq_init_attr){ .comp_mask = 0, }; - if (priv->cqe_comp) { - attr.cq.comp_mask |= IBV_CQ_INIT_ATTR_MASK_FLAGS; - attr.cq.flags |= MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + if (priv->cqe_comp && !rxq_data->hw_timestamp) { + attr.cq.mlx5.comp_mask |= + MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE; + attr.cq.mlx5.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH; /* * For vectorized Rx, it must not be doubled in order to * make cq_ci and rq_ci aligned. */ if (rxq_check_vec_support(rxq_data) < 0) - cqe_n *= 2; + attr.cq.ibv.cqe *= 2; + } else if (priv->cqe_comp && rxq_data->hw_timestamp) { + DEBUG("Rx CQE compression is disabled for HW timestamp"); } - tmpl->cq = ibv_create_cq(priv->ctx, cqe_n, NULL, tmpl->channel, 0); + tmpl->cq = ibv_cq_ex_to_cq(mlx5dv_create_cq(priv->ctx, &attr.cq.ibv, + &attr.cq.mlx5)); if (tmpl->cq == NULL) { ERROR("%p: CQ creation failure", (void *)rxq_ctrl); goto error; @@ -1247,6 +724,9 @@ mlx5_priv_rxq_ibv_new(struct priv *priv, uint16_t idx) }; rxq_data->cq_db = cq_info.dbrec; rxq_data->cqes = (volatile struct mlx5_cqe (*)[])(uintptr_t)cq_info.buf; + rxq_data->cq_uar = cq_info.cq_uar; + rxq_data->cqn = cq_info.cqn; + rxq_data->cq_arm_sn = 0; /* Update doorbell counter. */ rxq_data->rq_ci = (1 << rxq_data->elts_n) >> rxq_data->sges_n; rte_wmb(); @@ -1408,6 +888,7 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, 0, socket); if (!tmpl) return NULL; + tmpl->socket = socket; if (priv->dev->data->dev_conf.intr_conf.rxq) tmpl->irq = 1; /* Enable scattered packets support for this queue if necessary. */ @@ -1462,6 +943,8 @@ mlx5_priv_rxq_new(struct priv *priv, uint16_t idx, uint16_t desc, if (priv->hw_csum_l2tun) tmpl->rxq.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + tmpl->rxq.hw_timestamp = + !!dev->data->dev_conf.rxmode.hw_timestamp; /* Configure VLAN stripping. */ tmpl->rxq.vlan_strip = (priv->hw_vlan_strip && !!dev->data->dev_conf.rxmode.hw_vlan_strip); @@ -1632,7 +1115,7 @@ mlx5_priv_ind_table_ibv_new(struct priv *priv, uint16_t queues[], struct mlx5_ind_table_ibv *ind_tbl; const unsigned int wq_n = rte_is_power_of_2(queues_n) ? log2above(queues_n) : - priv->ind_table_max_size; + log2above(priv->ind_table_max_size); struct ibv_wq *wq[1 << wq_n]; unsigned int i; unsigned int j; @@ -1777,7 +1260,8 @@ mlx5_priv_ind_table_ibv_verify(struct priv *priv) * @param hash_fields * Verbs protocol hash field to make the RSS on. * @param queues - * Queues entering in hash queue. + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. * @@ -1792,6 +1276,7 @@ mlx5_priv_hrxq_new(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, struct mlx5_ind_table_ibv *ind_tbl; struct ibv_qp *qp; + queues_n = hash_fields ? queues_n : 1; ind_tbl = mlx5_priv_ind_table_ibv_get(priv, queues, queues_n); if (!ind_tbl) ind_tbl = mlx5_priv_ind_table_ibv_new(priv, queues, queues_n); @@ -1844,7 +1329,8 @@ error: * @param rss_conf * RSS configuration for the Rx hash queue. * @param queues - * Queues entering in hash queue. + * Queues entering in hash queue. In case of empty hash_fields only the + * first queue index will be taken for the indirection table. * @param queues_n * Number of queues. * @@ -1857,6 +1343,7 @@ mlx5_priv_hrxq_get(struct priv *priv, uint8_t *rss_key, uint8_t rss_key_len, { struct mlx5_hrxq *hrxq; + queues_n = hash_fields ? queues_n : 1; LIST_FOREACH(hrxq, &priv->hrxqs, next) { struct mlx5_ind_table_ibv *ind_tbl;