X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=37b4efd7bc32019a9a0b3a90dbfd28c91ad20e76;hb=7ee177a72fd0e4cd752efceb896a850875a7af1f;hp=d46fc132a70e3eff0a6cceda4262cc8c84769fff;hpb=0573873d5b7dda5ffb8b1e6733497df115a29073;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index d46fc132a7..37b4efd7bc 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -74,7 +74,7 @@ const struct hash_rxq_init hash_rxq_init[] = { .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, .flow_priority = 0, .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_TCP, + .type = IBV_EXP_FLOW_SPEC_TCP, .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), }, .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], @@ -87,7 +87,7 @@ const struct hash_rxq_init hash_rxq_init[] = { .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, .flow_priority = 0, .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_UDP, + .type = IBV_EXP_FLOW_SPEC_UDP, .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), }, .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], @@ -99,17 +99,57 @@ const struct hash_rxq_init hash_rxq_init[] = { ETH_RSS_FRAG_IPV4), .flow_priority = 1, .flow_spec.ipv4 = { - .type = IBV_FLOW_SPEC_IPV4, + .type = IBV_EXP_FLOW_SPEC_IPV4, .size = sizeof(hash_rxq_init[0].flow_spec.ipv4), }, .underlayer = &hash_rxq_init[HASH_RXQ_ETH], }, +#ifdef HAVE_FLOW_SPEC_IPV6 + [HASH_RXQ_TCPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6 | + IBV_EXP_RX_HASH_SRC_PORT_TCP | + IBV_EXP_RX_HASH_DST_PORT_TCP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_EXP_FLOW_SPEC_TCP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], + }, + [HASH_RXQ_UDPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6 | + IBV_EXP_RX_HASH_SRC_PORT_UDP | + IBV_EXP_RX_HASH_DST_PORT_UDP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_EXP_FLOW_SPEC_UDP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], + }, + [HASH_RXQ_IPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6), + .dpdk_rss_hf = (ETH_RSS_IPV6 | + ETH_RSS_FRAG_IPV6), + .flow_priority = 1, + .flow_spec.ipv6 = { + .type = IBV_EXP_FLOW_SPEC_IPV6, + .size = sizeof(hash_rxq_init[0].flow_spec.ipv6), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_ETH], + }, +#endif /* HAVE_FLOW_SPEC_IPV6 */ [HASH_RXQ_ETH] = { .hash_fields = 0, .dpdk_rss_hf = 0, .flow_priority = 2, .flow_spec.eth = { - .type = IBV_FLOW_SPEC_ETH, + .type = IBV_EXP_FLOW_SPEC_ETH, .size = sizeof(hash_rxq_init[0].flow_spec.eth), }, .underlayer = NULL, @@ -127,8 +167,17 @@ static const struct ind_table_init ind_table_init[] = { 1 << HASH_RXQ_TCPV4 | 1 << HASH_RXQ_UDPV4 | 1 << HASH_RXQ_IPV4 | +#ifdef HAVE_FLOW_SPEC_IPV6 + 1 << HASH_RXQ_TCPV6 | + 1 << HASH_RXQ_UDPV6 | + 1 << HASH_RXQ_IPV6 | +#endif /* HAVE_FLOW_SPEC_IPV6 */ 0, +#ifdef HAVE_FLOW_SPEC_IPV6 + .hash_types_n = 6, +#else /* HAVE_FLOW_SPEC_IPV6 */ .hash_types_n = 3, +#endif /* HAVE_FLOW_SPEC_IPV6 */ }, { .max_size = 1, @@ -174,7 +223,7 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); */ size_t hash_rxq_flow_attr(const struct hash_rxq *hash_rxq, - struct ibv_flow_attr *flow_attr, + struct ibv_exp_flow_attr *flow_attr, size_t flow_attr_size) { size_t offset = sizeof(*flow_attr); @@ -191,8 +240,8 @@ hash_rxq_flow_attr(const struct hash_rxq *hash_rxq, return offset; flow_attr_size = offset; init = &hash_rxq_init[type]; - *flow_attr = (struct ibv_flow_attr){ - .type = IBV_FLOW_ATTR_NORMAL, + *flow_attr = (struct ibv_exp_flow_attr){ + .type = IBV_EXP_FLOW_ATTR_NORMAL, .priority = init->flow_priority, .num_of_specs = 0, .port = hash_rxq->priv->port, @@ -210,43 +259,29 @@ hash_rxq_flow_attr(const struct hash_rxq *hash_rxq, } /** - * Return nearest power of two above input value. - * - * @param v - * Input value. - * - * @return - * Nearest power of two above input value. - */ -static unsigned int -log2above(unsigned int v) -{ - unsigned int l; - unsigned int r; - - for (l = 0, r = 0; (v >> 1); ++l, v >>= 1) - r |= (v & 1); - return (l + r); -} - -/** - * Return the type corresponding to the n'th bit set. + * Convert hash type position in indirection table initializer to + * hash RX queue type. * * @param table - * The indirection table. - * @param n - * The n'th bit set. + * Indirection table initializer. + * @param pos + * Hash type position. * * @return - * The corresponding hash_rxq_type. + * Hash RX queue type. */ static enum hash_rxq_type -hash_rxq_type_from_n(const struct ind_table_init *table, unsigned int n) +hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos) { - assert(n < table->hash_types_n); - while (((table->hash_types >> n) & 0x1) == 0) - ++n; - return n; + enum hash_rxq_type type = 0; + + assert(pos < table->hash_types_n); + do { + if ((table->hash_types & (1 << type)) && (pos-- == 0)) + break; + ++type; + } while (1); + return type; } /** @@ -311,14 +346,7 @@ priv_make_ind_table_init(struct priv *priv, int priv_create_hash_rxqs(struct priv *priv) { - /* If the requested number of WQs is not a power of two, use the - * maximum indirection table size for better balancing. - * The result is always rounded to the next power of two. */ - unsigned int wqs_n = - (1 << log2above((priv->rxqs_n & (priv->rxqs_n - 1)) ? - priv->ind_table_max_size : - priv->rxqs_n)); - struct ibv_exp_wq *wqs[wqs_n]; + struct ibv_exp_wq *wqs[priv->reta_idx_n]; struct ind_table_init ind_table_init[IND_TABLE_INIT_N]; unsigned int ind_tables_n = priv_make_ind_table_init(priv, &ind_table_init); @@ -344,25 +372,15 @@ priv_create_hash_rxqs(struct priv *priv) " indirection table cannot be created"); return EINVAL; } - if ((wqs_n < priv->rxqs_n) || (wqs_n > priv->ind_table_max_size)) { - ERROR("cannot handle this many RX queues (%u)", priv->rxqs_n); - err = ERANGE; - goto error; - } - if (wqs_n != priv->rxqs_n) { + if (priv->rxqs_n & (priv->rxqs_n - 1)) { INFO("%u RX queues are configured, consider rounding this" " number to the next power of two for better balancing", priv->rxqs_n); - DEBUG("indirection table extended to assume %u WQs", wqs_n); - } - /* When the number of RX queues is not a power of two, the remaining - * table entries are padded with reused WQs and hashes are not spread - * uniformly. */ - for (i = 0, j = 0; (i != wqs_n); ++i) { - wqs[i] = (*priv->rxqs)[j]->wq; - if (++j == priv->rxqs_n) - j = 0; + DEBUG("indirection table extended to assume %u WQs", + priv->reta_idx_n); } + for (i = 0; (i != priv->reta_idx_n); ++i) + wqs[i] = (*priv->rxqs)[(*priv->reta_idx)[i]]->wq; /* Get number of hash RX queues to configure. */ for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i) hash_rxqs_n += ind_table_init[i].hash_types_n; @@ -387,8 +405,8 @@ priv_create_hash_rxqs(struct priv *priv) unsigned int ind_tbl_size = ind_table_init[i].max_size; struct ibv_exp_rwq_ind_table *ind_table; - if (wqs_n < ind_tbl_size) - ind_tbl_size = wqs_n; + if (priv->reta_idx_n < ind_tbl_size) + ind_tbl_size = priv->reta_idx_n; ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size); errno = 0; ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, @@ -417,7 +435,7 @@ priv_create_hash_rxqs(struct priv *priv) ++i) { struct hash_rxq *hash_rxq = &(*hash_rxqs)[i]; enum hash_rxq_type type = - hash_rxq_type_from_n(&ind_table_init[j], k); + hash_rxq_type_from_pos(&ind_table_init[j], k); struct rte_eth_rss_conf *priv_rss_conf = (*priv->rss_conf)[type]; struct ibv_exp_rx_hash_conf hash_conf = { @@ -441,8 +459,8 @@ priv_create_hash_rxqs(struct priv *priv) .port_num = priv->port, }; - DEBUG("using indirection table %u for hash RX queue %u", - j, i); + DEBUG("using indirection table %u for hash RX queue %u type %d", + j, i, type); *hash_rxq = (struct hash_rxq){ .priv = priv, .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr), @@ -538,6 +556,35 @@ priv_destroy_hash_rxqs(struct priv *priv) priv->ind_tables = NULL; } +/** + * Check whether a given flow type is allowed. + * + * @param priv + * Pointer to private structure. + * @param type + * Flow type to check. + * + * @return + * Nonzero if the given flow type is allowed. + */ +int +priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) +{ + /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode + * has been requested. */ + if (priv->promisc_req) + return (type == HASH_RXQ_FLOW_TYPE_PROMISC); + switch (type) { + case HASH_RXQ_FLOW_TYPE_PROMISC: + return !!priv->promisc_req; + case HASH_RXQ_FLOW_TYPE_ALLMULTI: + return !!priv->allmulti_req; + case HASH_RXQ_FLOW_TYPE_MAC: + return 1; + } + return 0; +} + /** * Allocate RX queue elements with scattered packets support. *