X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_rxq.c;h=ebbe186d96babdb5a97693a9c7f2c08b80f9c02e;hb=6069d815bc4dd73e82396a607882fe8395e592ed;hp=79c2346a45721afc17cdb9abba9992badedf30f8;hpb=2f97422e7759d83061fd04876c2110cf2655a604;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index 79c2346a45..ebbe186d96 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -65,15 +65,16 @@ #include "mlx5_defs.h" /* Initialization data for hash RX queues. */ -static const struct hash_rxq_init hash_rxq_init[] = { +const struct hash_rxq_init hash_rxq_init[] = { [HASH_RXQ_TCPV4] = { .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | IBV_EXP_RX_HASH_DST_IPV4 | IBV_EXP_RX_HASH_SRC_PORT_TCP | IBV_EXP_RX_HASH_DST_PORT_TCP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_TCP, .flow_priority = 0, .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_TCP, + .type = IBV_EXP_FLOW_SPEC_TCP, .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), }, .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], @@ -83,9 +84,10 @@ static const struct hash_rxq_init hash_rxq_init[] = { IBV_EXP_RX_HASH_DST_IPV4 | IBV_EXP_RX_HASH_SRC_PORT_UDP | IBV_EXP_RX_HASH_DST_PORT_UDP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV4_UDP, .flow_priority = 0, .flow_spec.tcp_udp = { - .type = IBV_FLOW_SPEC_UDP, + .type = IBV_EXP_FLOW_SPEC_UDP, .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), }, .underlayer = &hash_rxq_init[HASH_RXQ_IPV4], @@ -93,18 +95,61 @@ static const struct hash_rxq_init hash_rxq_init[] = { [HASH_RXQ_IPV4] = { .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV4 | IBV_EXP_RX_HASH_DST_IPV4), + .dpdk_rss_hf = (ETH_RSS_IPV4 | + ETH_RSS_FRAG_IPV4), .flow_priority = 1, .flow_spec.ipv4 = { - .type = IBV_FLOW_SPEC_IPV4, + .type = IBV_EXP_FLOW_SPEC_IPV4, .size = sizeof(hash_rxq_init[0].flow_spec.ipv4), }, .underlayer = &hash_rxq_init[HASH_RXQ_ETH], }, +#ifdef HAVE_FLOW_SPEC_IPV6 + [HASH_RXQ_TCPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6 | + IBV_EXP_RX_HASH_SRC_PORT_TCP | + IBV_EXP_RX_HASH_DST_PORT_TCP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_TCP, + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_EXP_FLOW_SPEC_TCP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], + }, + [HASH_RXQ_UDPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6 | + IBV_EXP_RX_HASH_SRC_PORT_UDP | + IBV_EXP_RX_HASH_DST_PORT_UDP), + .dpdk_rss_hf = ETH_RSS_NONFRAG_IPV6_UDP, + .flow_priority = 0, + .flow_spec.tcp_udp = { + .type = IBV_EXP_FLOW_SPEC_UDP, + .size = sizeof(hash_rxq_init[0].flow_spec.tcp_udp), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_IPV6], + }, + [HASH_RXQ_IPV6] = { + .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | + IBV_EXP_RX_HASH_DST_IPV6), + .dpdk_rss_hf = (ETH_RSS_IPV6 | + ETH_RSS_FRAG_IPV6), + .flow_priority = 1, + .flow_spec.ipv6 = { + .type = IBV_EXP_FLOW_SPEC_IPV6, + .size = sizeof(hash_rxq_init[0].flow_spec.ipv6), + }, + .underlayer = &hash_rxq_init[HASH_RXQ_ETH], + }, +#endif /* HAVE_FLOW_SPEC_IPV6 */ [HASH_RXQ_ETH] = { .hash_fields = 0, + .dpdk_rss_hf = 0, .flow_priority = 2, .flow_spec.eth = { - .type = IBV_FLOW_SPEC_ETH, + .type = IBV_EXP_FLOW_SPEC_ETH, .size = sizeof(hash_rxq_init[0].flow_spec.eth), }, .underlayer = NULL, @@ -112,7 +157,7 @@ static const struct hash_rxq_init hash_rxq_init[] = { }; /* Number of entries in hash_rxq_init[]. */ -static const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); +const unsigned int hash_rxq_init_n = RTE_DIM(hash_rxq_init); /* Initialization data for hash RX queue indirection tables. */ static const struct ind_table_init ind_table_init[] = { @@ -122,8 +167,17 @@ static const struct ind_table_init ind_table_init[] = { 1 << HASH_RXQ_TCPV4 | 1 << HASH_RXQ_UDPV4 | 1 << HASH_RXQ_IPV4 | +#ifdef HAVE_FLOW_SPEC_IPV6 + 1 << HASH_RXQ_TCPV6 | + 1 << HASH_RXQ_UDPV6 | + 1 << HASH_RXQ_IPV6 | +#endif /* HAVE_FLOW_SPEC_IPV6 */ 0, +#ifdef HAVE_FLOW_SPEC_IPV6 + .hash_types_n = 6, +#else /* HAVE_FLOW_SPEC_IPV6 */ .hash_types_n = 3, +#endif /* HAVE_FLOW_SPEC_IPV6 */ }, { .max_size = 1, @@ -169,7 +223,7 @@ const size_t rss_hash_default_key_len = sizeof(rss_hash_default_key); */ size_t hash_rxq_flow_attr(const struct hash_rxq *hash_rxq, - struct ibv_flow_attr *flow_attr, + struct ibv_exp_flow_attr *flow_attr, size_t flow_attr_size) { size_t offset = sizeof(*flow_attr); @@ -186,8 +240,8 @@ hash_rxq_flow_attr(const struct hash_rxq *hash_rxq, return offset; flow_attr_size = offset; init = &hash_rxq_init[type]; - *flow_attr = (struct ibv_flow_attr){ - .type = IBV_FLOW_ATTR_NORMAL, + *flow_attr = (struct ibv_exp_flow_attr){ + .type = IBV_EXP_FLOW_ATTR_NORMAL, .priority = init->flow_priority, .num_of_specs = 0, .port = hash_rxq->priv->port, @@ -205,43 +259,29 @@ hash_rxq_flow_attr(const struct hash_rxq *hash_rxq, } /** - * Return nearest power of two above input value. - * - * @param v - * Input value. - * - * @return - * Nearest power of two above input value. - */ -static unsigned int -log2above(unsigned int v) -{ - unsigned int l; - unsigned int r; - - for (l = 0, r = 0; (v >> 1); ++l, v >>= 1) - r |= (v & 1); - return (l + r); -} - -/** - * Return the type corresponding to the n'th bit set. + * Convert hash type position in indirection table initializer to + * hash RX queue type. * * @param table - * The indirection table. - * @param n - * The n'th bit set. + * Indirection table initializer. + * @param pos + * Hash type position. * * @return - * The corresponding hash_rxq_type. + * Hash RX queue type. */ static enum hash_rxq_type -hash_rxq_type_from_n(const struct ind_table_init *table, unsigned int n) +hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos) { - assert(n < table->hash_types_n); - while (((table->hash_types >> n) & 0x1) == 0) - ++n; - return n; + enum hash_rxq_type type = 0; + + assert(pos < table->hash_types_n); + do { + if ((table->hash_types & (1 << type)) && (pos-- == 0)) + break; + ++type; + } while (1); + return type; } /** @@ -259,16 +299,18 @@ static unsigned int priv_make_ind_table_init(struct priv *priv, struct ind_table_init (*table)[IND_TABLE_INIT_N]) { + uint64_t rss_hf; unsigned int i; unsigned int j; unsigned int table_n = 0; /* Mandatory to receive frames not handled by normal hash RX queues. */ unsigned int hash_types_sup = 1 << HASH_RXQ_ETH; + rss_hf = priv->dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf; /* Process other protocols only if more than one queue. */ if (priv->rxqs_n > 1) for (i = 0; (i != hash_rxq_init_n); ++i) - if (hash_rxq_init[i].hash_fields) + if (rss_hf & hash_rxq_init[i].dpdk_rss_hf) hash_types_sup |= (1 << i); /* Filter out entries whose protocols are not in the set. */ @@ -304,14 +346,7 @@ priv_make_ind_table_init(struct priv *priv, int priv_create_hash_rxqs(struct priv *priv) { - /* If the requested number of WQs is not a power of two, use the - * maximum indirection table size for better balancing. - * The result is always rounded to the next power of two. */ - unsigned int wqs_n = - (1 << log2above((priv->rxqs_n & (priv->rxqs_n - 1)) ? - priv->ind_table_max_size : - priv->rxqs_n)); - struct ibv_exp_wq *wqs[wqs_n]; + struct ibv_exp_wq *wqs[priv->reta_idx_n]; struct ind_table_init ind_table_init[IND_TABLE_INIT_N]; unsigned int ind_tables_n = priv_make_ind_table_init(priv, &ind_table_init); @@ -329,7 +364,6 @@ priv_create_hash_rxqs(struct priv *priv) assert(priv->hash_rxqs_n == 0); assert(priv->pd != NULL); assert(priv->ctx != NULL); - assert(priv->rss_conf != NULL); if (priv->rxqs_n == 0) return EINVAL; assert(priv->rxqs != NULL); @@ -338,25 +372,15 @@ priv_create_hash_rxqs(struct priv *priv) " indirection table cannot be created"); return EINVAL; } - if ((wqs_n < priv->rxqs_n) || (wqs_n > priv->ind_table_max_size)) { - ERROR("cannot handle this many RX queues (%u)", priv->rxqs_n); - err = ERANGE; - goto error; - } - if (wqs_n != priv->rxqs_n) { + if (priv->rxqs_n & (priv->rxqs_n - 1)) { INFO("%u RX queues are configured, consider rounding this" " number to the next power of two for better balancing", priv->rxqs_n); - DEBUG("indirection table extended to assume %u WQs", wqs_n); - } - /* When the number of RX queues is not a power of two, the remaining - * table entries are padded with reused WQs and hashes are not spread - * uniformly. */ - for (i = 0, j = 0; (i != wqs_n); ++i) { - wqs[i] = (*priv->rxqs)[j]->wq; - if (++j == priv->rxqs_n) - j = 0; + DEBUG("indirection table extended to assume %u WQs", + priv->reta_idx_n); } + for (i = 0; (i != priv->reta_idx_n); ++i) + wqs[i] = (*priv->rxqs)[(*priv->reta_idx)[i]]->wq; /* Get number of hash RX queues to configure. */ for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i) hash_rxqs_n += ind_table_init[i].hash_types_n; @@ -381,8 +405,8 @@ priv_create_hash_rxqs(struct priv *priv) unsigned int ind_tbl_size = ind_table_init[i].max_size; struct ibv_exp_rwq_ind_table *ind_table; - if (wqs_n < ind_tbl_size) - ind_tbl_size = wqs_n; + if (priv->reta_idx_n < ind_tbl_size) + ind_tbl_size = priv->reta_idx_n; ind_init_attr.log_ind_tbl_size = log2above(ind_tbl_size); errno = 0; ind_table = ibv_exp_create_rwq_ind_table(priv->ctx, @@ -411,11 +435,17 @@ priv_create_hash_rxqs(struct priv *priv) ++i) { struct hash_rxq *hash_rxq = &(*hash_rxqs)[i]; enum hash_rxq_type type = - hash_rxq_type_from_n(&ind_table_init[j], k); + hash_rxq_type_from_pos(&ind_table_init[j], k); + struct rte_eth_rss_conf *priv_rss_conf = + (*priv->rss_conf)[type]; struct ibv_exp_rx_hash_conf hash_conf = { .rx_hash_function = IBV_EXP_RX_HASH_FUNC_TOEPLITZ, - .rx_hash_key_len = priv->rss_conf->rss_key_len, - .rx_hash_key = priv->rss_conf->rss_key, + .rx_hash_key_len = (priv_rss_conf ? + priv_rss_conf->rss_key_len : + rss_hash_default_key_len), + .rx_hash_key = (priv_rss_conf ? + priv_rss_conf->rss_key : + rss_hash_default_key), .rx_hash_fields_mask = hash_rxq_init[type].hash_fields, .rwq_ind_tbl = (*ind_tables)[j], }; @@ -429,8 +459,8 @@ priv_create_hash_rxqs(struct priv *priv) .port_num = priv->port, }; - DEBUG("using indirection table %u for hash RX queue %u", - j, i); + DEBUG("using indirection table %u for hash RX queue %u type %d", + j, i, type); *hash_rxq = (struct hash_rxq){ .priv = priv, .qp = ibv_exp_create_qp(priv->ctx, &qp_init_attr), @@ -526,6 +556,35 @@ priv_destroy_hash_rxqs(struct priv *priv) priv->ind_tables = NULL; } +/** + * Check whether a given flow type is allowed. + * + * @param priv + * Pointer to private structure. + * @param type + * Flow type to check. + * + * @return + * Nonzero if the given flow type is allowed. + */ +int +priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) +{ + /* Only FLOW_TYPE_PROMISC is allowed when promiscuous mode + * has been requested. */ + if (priv->promisc_req) + return type == HASH_RXQ_FLOW_TYPE_PROMISC; + switch (type) { + case HASH_RXQ_FLOW_TYPE_PROMISC: + return !!priv->promisc_req; + case HASH_RXQ_FLOW_TYPE_ALLMULTI: + return !!priv->allmulti_req; + case HASH_RXQ_FLOW_TYPE_MAC: + return 1; + } + return 0; +} + /** * Allocate RX queue elements with scattered packets support. *