+/**
+ * Update flows according to pattern and RSS hash fields.
+ *
+ * @param[in, out] parser
+ * Internal parser structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+mlx5_flow_convert_rss(struct mlx5_flow_parse *parser)
+{
+ unsigned int i;
+ enum hash_rxq_type start;
+ enum hash_rxq_type layer;
+ int outer = parser->tunnel && parser->rss_conf.level < 2;
+ uint64_t rss = parser->rss_conf.types;
+
+ /* Default to outer RSS. */
+ if (!parser->rss_conf.level)
+ parser->rss_conf.level = 1;
+ layer = outer ? parser->out_layer : parser->layer;
+ if (layer == HASH_RXQ_TUNNEL)
+ layer = HASH_RXQ_ETH;
+ if (outer) {
+ /* Only one hash type for outer RSS. */
+ if (rss && layer == HASH_RXQ_ETH) {
+ start = HASH_RXQ_TCPV4;
+ } else if (rss && layer != HASH_RXQ_ETH &&
+ !(rss & hash_rxq_init[layer].dpdk_rss_hf)) {
+ /* If RSS not match L4 pattern, try L3 RSS. */
+ if (layer < HASH_RXQ_IPV4)
+ layer = HASH_RXQ_IPV4;
+ else if (layer > HASH_RXQ_IPV4 && layer < HASH_RXQ_IPV6)
+ layer = HASH_RXQ_IPV6;
+ start = layer;
+ } else {
+ start = layer;
+ }
+ /* Scan first valid hash type. */
+ for (i = start; rss && i <= layer; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ if (hash_rxq_init[i].dpdk_rss_hf & rss)
+ break;
+ }
+ if (rss && i <= layer)
+ parser->queue[layer].hash_fields =
+ hash_rxq_init[i].hash_fields;
+ /* Trim unused hash types. */
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (parser->queue[i].ibv_attr && i != layer) {
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ }
+ }
+ } else {
+ /* Expand for inner or normal RSS. */
+ if (rss && (layer == HASH_RXQ_ETH || layer == HASH_RXQ_IPV4))
+ start = HASH_RXQ_TCPV4;
+ else if (rss && layer == HASH_RXQ_IPV6)
+ start = HASH_RXQ_TCPV6;
+ else
+ start = layer;
+ /* For L4 pattern, try L3 RSS if no L4 RSS. */
+ /* Trim unused hash types. */
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ if (i < start || i > layer) {
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ continue;
+ }
+ if (!rss)
+ continue;
+ if (hash_rxq_init[i].dpdk_rss_hf & rss) {
+ parser->queue[i].hash_fields =
+ hash_rxq_init[i].hash_fields;
+ } else if (i != layer) {
+ /* Remove unused RSS expansion. */
+ rte_free(parser->queue[i].ibv_attr);
+ parser->queue[i].ibv_attr = NULL;
+ } else if (layer < HASH_RXQ_IPV4 &&
+ (hash_rxq_init[HASH_RXQ_IPV4].dpdk_rss_hf &
+ rss)) {
+ /* Allow IPv4 RSS on L4 pattern. */
+ parser->queue[i].hash_fields =
+ hash_rxq_init[HASH_RXQ_IPV4]
+ .hash_fields;
+ } else if (i > HASH_RXQ_IPV4 && i < HASH_RXQ_IPV6 &&
+ (hash_rxq_init[HASH_RXQ_IPV6].dpdk_rss_hf &
+ rss)) {
+ /* Allow IPv4 RSS on L4 pattern. */
+ parser->queue[i].hash_fields =
+ hash_rxq_init[HASH_RXQ_IPV6]
+ .hash_fields;
+ }
+ }
+ }
+ return 0;
+}
+