X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow.c;h=78e35632e48b8813ab7ee006b964ff1aacbe5c24;hb=80f2d0ed7ff95fe6feedaa27740decd948d78235;hp=f1811c5c93066ead2db71d55d5b17f2279ad8a34;hpb=78a54648ffe3d3174af0daf6f276abec22832bde;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index f1811c5c93..78e35632e4 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -227,6 +227,7 @@ struct rte_flow { struct rte_flow_action_rss rss_conf; /**< RSS configuration */ uint16_t (*queues)[]; /**< Queues indexes to use. */ uint8_t rss_key[40]; /**< copy of the RSS key. */ + uint32_t tunnel; /**< Tunnel type of RTE_PTYPE_TUNNEL_XXX. */ struct ibv_counter_set *cs; /**< Holds the counters for the rule. */ struct mlx5_flow_counter_stats counter_stats;/**> 12) const uint32_t ptype_ext[] = { @@ -458,6 +464,7 @@ struct mlx5_flow_parse { /**< Pointer to Verbs attributes. */ unsigned int offset; /**< Current position or total size of the attribute. */ + uint64_t hash_fields; /**< Verbs hash fields. */ } queue[RTE_DIM(hash_rxq_init)]; }; @@ -701,7 +708,8 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev, " function is Toeplitz"); return -rte_errno; } - if (rss->level) { +#ifndef HAVE_IBV_DEVICE_TUNNEL_SUPPORT + if (parser->rss_conf.level > 1) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -709,6 +717,15 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev, " level is not supported"); return -rte_errno; } +#endif + if (parser->rss_conf.level > 2) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "RSS encapsulation level" + " > 1 is not supported"); + return -rte_errno; + } if (rss->types & MLX5_RSS_HF_MASK) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -759,7 +776,7 @@ mlx5_flow_convert_actions(struct rte_eth_dev *dev, } parser->rss_conf = (struct rte_flow_action_rss){ .func = RTE_ETH_HASH_FUNCTION_DEFAULT, - .level = 0, + .level = rss->level, .types = rss->types, .key_len = rss_key_len, .queue_num = rss->queue_num, @@ -843,10 +860,12 @@ exit_action_overlap: * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -mlx5_flow_convert_items_validate(const struct rte_flow_item items[], +mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, + const struct rte_flow_item items[], struct rte_flow_error *error, struct mlx5_flow_parse *parser) { + struct priv *priv = dev->data->dev_private; const struct mlx5_flow_items *cur_item = mlx5_flow_items; unsigned int i; int ret = 0; @@ -880,7 +899,7 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], if (ret) goto exit_item_not_supported; if (IS_TUNNEL(items->type)) { - if (parser->inner) { + if (parser->tunnel) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, @@ -888,7 +907,16 @@ mlx5_flow_convert_items_validate(const struct rte_flow_item items[], " tunnel encapsulations."); return -rte_errno; } + if (!priv->config.tunnel_en && + parser->rss_conf.level > 1) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + items, + "RSS on tunnel is not supported"); + return -rte_errno; + } parser->inner = IBV_FLOW_SPEC_INNER; + parser->tunnel = flow_ptype[items->type]; } if (parser->drop) { parser->queue[HASH_RXQ_ETH].offset += cur_item->dst_sz; @@ -1005,59 +1033,12 @@ mlx5_flow_update_priority(struct rte_eth_dev *dev, static void mlx5_flow_convert_finalise(struct mlx5_flow_parse *parser) { - const unsigned int ipv4 = - hash_rxq_init[parser->layer].ip_version == MLX5_IPV4; - const enum hash_rxq_type hmin = ipv4 ? HASH_RXQ_TCPV4 : HASH_RXQ_TCPV6; - const enum hash_rxq_type hmax = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; - const enum hash_rxq_type ohmin = ipv4 ? HASH_RXQ_TCPV6 : HASH_RXQ_TCPV4; - const enum hash_rxq_type ohmax = ipv4 ? HASH_RXQ_IPV6 : HASH_RXQ_IPV4; - const enum hash_rxq_type ip = ipv4 ? HASH_RXQ_IPV4 : HASH_RXQ_IPV6; unsigned int i; + uint32_t inner = parser->inner; - /* Remove any other flow not matching the pattern. */ - if (parser->rss_conf.queue_num == 1 && !parser->rss_conf.types) { - for (i = 0; i != hash_rxq_init_n; ++i) { - if (i == HASH_RXQ_ETH) - continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } + /* Don't create extra flows for outer RSS. */ + if (parser->tunnel && parser->rss_conf.level < 2) return; - } - if (parser->layer == HASH_RXQ_ETH) { - goto fill; - } else { - /* - * This layer becomes useless as the pattern define under - * layers. - */ - rte_free(parser->queue[HASH_RXQ_ETH].ibv_attr); - parser->queue[HASH_RXQ_ETH].ibv_attr = NULL; - } - /* Remove opposite kind of layer e.g. IPv6 if the pattern is IPv4. */ - for (i = ohmin; i != (ohmax + 1); ++i) { - if (!parser->queue[i].ibv_attr) - continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - /* Remove impossible flow according to the RSS configuration. */ - if (hash_rxq_init[parser->layer].dpdk_rss_hf & - parser->rss_conf.types) { - /* Remove any other flow. */ - for (i = hmin; i != (hmax + 1); ++i) { - if ((i == parser->layer) || - (!parser->queue[i].ibv_attr)) - continue; - rte_free(parser->queue[i].ibv_attr); - parser->queue[i].ibv_attr = NULL; - } - } else if (!parser->queue[ip].ibv_attr) { - /* no RSS possible with the current configuration. */ - parser->rss_conf.queue_num = 1; - return; - } -fill: /* * Fill missing layers in verbs specifications, or compute the correct * offset to allocate the memory space for the attributes and @@ -1068,23 +1049,25 @@ fill: struct ibv_flow_spec_ipv4_ext ipv4; struct ibv_flow_spec_ipv6 ipv6; struct ibv_flow_spec_tcp_udp udp_tcp; + struct ibv_flow_spec_eth eth; } specs; void *dst; uint16_t size; if (i == parser->layer) continue; - if (parser->layer == HASH_RXQ_ETH) { + if (parser->layer == HASH_RXQ_ETH || + parser->layer == HASH_RXQ_TUNNEL) { if (hash_rxq_init[i].ip_version == MLX5_IPV4) { size = sizeof(struct ibv_flow_spec_ipv4_ext); specs.ipv4 = (struct ibv_flow_spec_ipv4_ext){ - .type = IBV_FLOW_SPEC_IPV4_EXT, + .type = inner | IBV_FLOW_SPEC_IPV4_EXT, .size = size, }; } else { size = sizeof(struct ibv_flow_spec_ipv6); specs.ipv6 = (struct ibv_flow_spec_ipv6){ - .type = IBV_FLOW_SPEC_IPV6, + .type = inner | IBV_FLOW_SPEC_IPV6, .size = size, }; } @@ -1101,7 +1084,7 @@ fill: (i == HASH_RXQ_UDPV6) || (i == HASH_RXQ_TCPV6)) { size = sizeof(struct ibv_flow_spec_tcp_udp); specs.udp_tcp = (struct ibv_flow_spec_tcp_udp) { - .type = ((i == HASH_RXQ_UDPV4 || + .type = inner | ((i == HASH_RXQ_UDPV4 || i == HASH_RXQ_UDPV6) ? IBV_FLOW_SPEC_UDP : IBV_FLOW_SPEC_TCP), @@ -1119,6 +1102,109 @@ fill: } } +/** + * Update flows according to pattern and RSS hash fields. + * + * @param[in, out] parser + * Internal parser structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_convert_rss(struct mlx5_flow_parse *parser) +{ + unsigned int i; + enum hash_rxq_type start; + enum hash_rxq_type layer; + int outer = parser->tunnel && parser->rss_conf.level < 2; + uint64_t rss = parser->rss_conf.types; + + /* Default to outer RSS. */ + if (!parser->rss_conf.level) + parser->rss_conf.level = 1; + layer = outer ? parser->out_layer : parser->layer; + if (layer == HASH_RXQ_TUNNEL) + layer = HASH_RXQ_ETH; + if (outer) { + /* Only one hash type for outer RSS. */ + if (rss && layer == HASH_RXQ_ETH) { + start = HASH_RXQ_TCPV4; + } else if (rss && layer != HASH_RXQ_ETH && + !(rss & hash_rxq_init[layer].dpdk_rss_hf)) { + /* If RSS not match L4 pattern, try L3 RSS. */ + if (layer < HASH_RXQ_IPV4) + layer = HASH_RXQ_IPV4; + else if (layer > HASH_RXQ_IPV4 && layer < HASH_RXQ_IPV6) + layer = HASH_RXQ_IPV6; + start = layer; + } else { + start = layer; + } + /* Scan first valid hash type. */ + for (i = start; rss && i <= layer; ++i) { + if (!parser->queue[i].ibv_attr) + continue; + if (hash_rxq_init[i].dpdk_rss_hf & rss) + break; + } + if (rss && i <= layer) + parser->queue[layer].hash_fields = + hash_rxq_init[i].hash_fields; + /* Trim unused hash types. */ + for (i = 0; i != hash_rxq_init_n; ++i) { + if (parser->queue[i].ibv_attr && i != layer) { + rte_free(parser->queue[i].ibv_attr); + parser->queue[i].ibv_attr = NULL; + } + } + } else { + /* Expand for inner or normal RSS. */ + if (rss && (layer == HASH_RXQ_ETH || layer == HASH_RXQ_IPV4)) + start = HASH_RXQ_TCPV4; + else if (rss && layer == HASH_RXQ_IPV6) + start = HASH_RXQ_TCPV6; + else + start = layer; + /* For L4 pattern, try L3 RSS if no L4 RSS. */ + /* Trim unused hash types. */ + for (i = 0; i != hash_rxq_init_n; ++i) { + if (!parser->queue[i].ibv_attr) + continue; + if (i < start || i > layer) { + rte_free(parser->queue[i].ibv_attr); + parser->queue[i].ibv_attr = NULL; + continue; + } + if (!rss) + continue; + if (hash_rxq_init[i].dpdk_rss_hf & rss) { + parser->queue[i].hash_fields = + hash_rxq_init[i].hash_fields; + } else if (i != layer) { + /* Remove unused RSS expansion. */ + rte_free(parser->queue[i].ibv_attr); + parser->queue[i].ibv_attr = NULL; + } else if (layer < HASH_RXQ_IPV4 && + (hash_rxq_init[HASH_RXQ_IPV4].dpdk_rss_hf & + rss)) { + /* Allow IPv4 RSS on L4 pattern. */ + parser->queue[i].hash_fields = + hash_rxq_init[HASH_RXQ_IPV4] + .hash_fields; + } else if (i > HASH_RXQ_IPV4 && i < HASH_RXQ_IPV6 && + (hash_rxq_init[HASH_RXQ_IPV6].dpdk_rss_hf & + rss)) { + /* Allow IPv4 RSS on L4 pattern. */ + parser->queue[i].hash_fields = + hash_rxq_init[HASH_RXQ_IPV6] + .hash_fields; + } + } + } + return 0; +} + /** * Validate and convert a flow supported by the NIC. * @@ -1162,7 +1248,7 @@ mlx5_flow_convert(struct rte_eth_dev *dev, ret = mlx5_flow_convert_actions(dev, actions, error, parser); if (ret) return ret; - ret = mlx5_flow_convert_items_validate(items, error, parser); + ret = mlx5_flow_convert_items_validate(dev, items, error, parser); if (ret) return ret; mlx5_flow_convert_finalise(parser); @@ -1183,10 +1269,6 @@ mlx5_flow_convert(struct rte_eth_dev *dev, for (i = 0; i != hash_rxq_init_n; ++i) { unsigned int offset; - if (!(parser->rss_conf.types & - hash_rxq_init[i].dpdk_rss_hf) && - (i != HASH_RXQ_ETH)) - continue; offset = parser->queue[i].offset; parser->queue[i].ibv_attr = mlx5_flow_convert_allocate(offset, error); @@ -1197,6 +1279,8 @@ mlx5_flow_convert(struct rte_eth_dev *dev, } /* Third step. Conversion parse, fill the specifications. */ parser->inner = 0; + parser->tunnel = 0; + parser->layer = HASH_RXQ_ETH; for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) { struct mlx5_flow_data data = { .dev = dev, @@ -1215,6 +1299,15 @@ mlx5_flow_convert(struct rte_eth_dev *dev, if (ret) goto exit_free; } + if (!parser->drop) { + /* RSS check, remove unused hash types. */ + ret = mlx5_flow_convert_rss(parser); + if (ret) + goto exit_free; + /* Complete missing specification. */ + mlx5_flow_convert_finalise(parser); + } + mlx5_flow_update_priority(dev, parser, attr); if (parser->mark) mlx5_flow_create_flag_mark(parser, parser->mark_id); if (parser->count && parser->create) { @@ -1222,13 +1315,6 @@ mlx5_flow_convert(struct rte_eth_dev *dev, if (!parser->cs) goto exit_count_error; } - /* - * Last step. Complete missing specification to reach the RSS - * configuration. - */ - if (!parser->drop) - mlx5_flow_convert_finalise(parser); - mlx5_flow_update_priority(dev, parser, attr); exit_free: /* Only verification is expected, all resources should be released. */ if (!parser->create) { @@ -1276,17 +1362,11 @@ mlx5_flow_create_copy(struct mlx5_flow_parse *parser, void *src, for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) continue; - /* Specification must be the same l3 type or none. */ - if (parser->layer == HASH_RXQ_ETH || - (hash_rxq_init[parser->layer].ip_version == - hash_rxq_init[i].ip_version) || - (hash_rxq_init[i].ip_version == 0)) { - dst = (void *)((uintptr_t)parser->queue[i].ibv_attr + - parser->queue[i].offset); - memcpy(dst, src, size); - ++parser->queue[i].ibv_attr->num_of_specs; - parser->queue[i].offset += size; - } + dst = (void *)((uintptr_t)parser->queue[i].ibv_attr + + parser->queue[i].offset); + memcpy(dst, src, size); + ++parser->queue[i].ibv_attr->num_of_specs; + parser->queue[i].offset += size; } } @@ -1317,9 +1397,7 @@ mlx5_flow_create_eth(const struct rte_flow_item *item, .size = eth_size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_ETH; + parser->layer = HASH_RXQ_ETH; if (spec) { unsigned int i; @@ -1440,9 +1518,7 @@ mlx5_flow_create_ipv4(const struct rte_flow_item *item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_IPV4; + parser->layer = HASH_RXQ_IPV4; if (spec) { if (!mask) mask = default_mask; @@ -1505,9 +1581,7 @@ mlx5_flow_create_ipv6(const struct rte_flow_item *item, "L3 VXLAN not enabled by device" " parameter and/or not configured" " in firmware"); - /* Don't update layer for the inner pattern. */ - if (!parser->inner) - parser->layer = HASH_RXQ_IPV6; + parser->layer = HASH_RXQ_IPV6; if (spec) { unsigned int i; uint32_t vtc_flow_val; @@ -1580,13 +1654,10 @@ mlx5_flow_create_udp(const struct rte_flow_item *item, .size = udp_size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) { - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_UDPV4; - else - parser->layer = HASH_RXQ_UDPV6; - } + if (parser->layer == HASH_RXQ_IPV4) + parser->layer = HASH_RXQ_UDPV4; + else + parser->layer = HASH_RXQ_UDPV6; if (spec) { if (!mask) mask = default_mask; @@ -1629,13 +1700,10 @@ mlx5_flow_create_tcp(const struct rte_flow_item *item, .size = tcp_size, }; - /* Don't update layer for the inner pattern. */ - if (!parser->inner) { - if (parser->layer == HASH_RXQ_IPV4) - parser->layer = HASH_RXQ_TCPV4; - else - parser->layer = HASH_RXQ_TCPV6; - } + if (parser->layer == HASH_RXQ_IPV4) + parser->layer = HASH_RXQ_TCPV4; + else + parser->layer = HASH_RXQ_TCPV6; if (spec) { if (!mask) mask = default_mask; @@ -1684,6 +1752,12 @@ mlx5_flow_create_vxlan(const struct rte_flow_item *item, id.vni[0] = 0; parser->inner = IBV_FLOW_SPEC_INNER; + parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN)]; + parser->out_layer = parser->layer; + parser->layer = HASH_RXQ_TUNNEL; + /* Default VXLAN to outer RSS. */ + if (!parser->rss_conf.level) + parser->rss_conf.level = 1; if (spec) { if (!mask) mask = default_mask; @@ -1740,6 +1814,12 @@ mlx5_flow_create_gre(const struct rte_flow_item *item __rte_unused, unsigned int i; parser->inner = IBV_FLOW_SPEC_INNER; + parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)]; + parser->out_layer = parser->layer; + parser->layer = HASH_RXQ_TUNNEL; + /* Default GRE to inner RSS. */ + if (!parser->rss_conf.level) + parser->rss_conf.level = 2; /* Update encapsulation IP layer protocol. */ for (i = 0; i != hash_rxq_init_n; ++i) { if (!parser->queue[i].ibv_attr) @@ -1931,31 +2011,33 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, unsigned int i; for (i = 0; i != hash_rxq_init_n; ++i) { - uint64_t hash_fields; - if (!parser->queue[i].ibv_attr) continue; flow->frxq[i].ibv_attr = parser->queue[i].ibv_attr; parser->queue[i].ibv_attr = NULL; - hash_fields = hash_rxq_init[i].hash_fields; + flow->frxq[i].hash_fields = parser->queue[i].hash_fields; if (!priv->dev->data->dev_started) continue; flow->frxq[i].hrxq = mlx5_hrxq_get(dev, parser->rss_conf.key, parser->rss_conf.key_len, - hash_fields, + flow->frxq[i].hash_fields, parser->rss_conf.queue, - parser->rss_conf.queue_num); + parser->rss_conf.queue_num, + parser->tunnel, + parser->rss_conf.level); if (flow->frxq[i].hrxq) continue; flow->frxq[i].hrxq = mlx5_hrxq_new(dev, parser->rss_conf.key, parser->rss_conf.key_len, - hash_fields, + flow->frxq[i].hash_fields, parser->rss_conf.queue, - parser->rss_conf.queue_num); + parser->rss_conf.queue_num, + parser->tunnel, + parser->rss_conf.level); if (!flow->frxq[i].hrxq) { return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -1966,6 +2048,99 @@ mlx5_flow_create_action_queue_rss(struct rte_eth_dev *dev, return 0; } +/** + * RXQ update after flow rule creation. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to the flow rule. + */ +static void +mlx5_flow_create_update_rxqs(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct priv *priv = dev->data->dev_private; + unsigned int i; + unsigned int j; + + if (!dev->data->dev_started) + return; + for (i = 0; i != flow->rss_conf.queue_num; ++i) { + struct mlx5_rxq_data *rxq_data = (*priv->rxqs) + [(*flow->queues)[i]]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint8_t tunnel = PTYPE_IDX(flow->tunnel); + + rxq_data->mark |= flow->mark; + if (!tunnel) + continue; + rxq_ctrl->tunnel_types[tunnel] += 1; + /* Clear tunnel type if more than one tunnel types set. */ + for (j = 0; j != RTE_DIM(rxq_ctrl->tunnel_types); ++j) { + if (j == tunnel) + continue; + if (rxq_ctrl->tunnel_types[j] > 0) { + rxq_data->tunnel = 0; + break; + } + } + if (j == RTE_DIM(rxq_ctrl->tunnel_types)) + rxq_data->tunnel = flow->tunnel; + } +} + +/** + * Dump flow hash RX queue detail. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to the rte_flow. + * @param hrxq_idx + * Hash RX queue index. + */ +static void +mlx5_flow_dump(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + unsigned int hrxq_idx __rte_unused) +{ +#ifndef NDEBUG + uintptr_t spec_ptr; + uint16_t j; + char buf[256]; + uint8_t off; + + spec_ptr = (uintptr_t)(flow->frxq[hrxq_idx].ibv_attr + 1); + for (j = 0, off = 0; j < flow->frxq[hrxq_idx].ibv_attr->num_of_specs; + j++) { + struct ibv_flow_spec *spec = (void *)spec_ptr; + off += sprintf(buf + off, " %x(%hu)", spec->hdr.type, + spec->hdr.size); + spec_ptr += spec->hdr.size; + } + DRV_LOG(DEBUG, + "port %u Verbs flow %p type %u: hrxq:%p qp:%p ind:%p," + " hash:%" PRIx64 "/%u specs:%hhu(%hu), priority:%hu, type:%d," + " flags:%x, comp_mask:%x specs:%s", + dev->data->port_id, (void *)flow, hrxq_idx, + (void *)flow->frxq[hrxq_idx].hrxq, + (void *)flow->frxq[hrxq_idx].hrxq->qp, + (void *)flow->frxq[hrxq_idx].hrxq->ind_table, + flow->frxq[hrxq_idx].hash_fields | + (flow->tunnel && + flow->rss_conf.level > 1 ? (uint32_t)IBV_RX_HASH_INNER : 0), + flow->rss_conf.queue_num, + flow->frxq[hrxq_idx].ibv_attr->num_of_specs, + flow->frxq[hrxq_idx].ibv_attr->size, + flow->frxq[hrxq_idx].ibv_attr->priority, + flow->frxq[hrxq_idx].ibv_attr->type, + flow->frxq[hrxq_idx].ibv_attr->flags, + flow->frxq[hrxq_idx].ibv_attr->comp_mask, + buf); +#endif +} + /** * Complete flow rule creation. * @@ -2008,6 +2183,7 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); + mlx5_flow_dump(dev, flow, i); if (!flow->frxq[i].ibv_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, @@ -2015,23 +2191,13 @@ mlx5_flow_create_action_queue(struct rte_eth_dev *dev, goto error; } ++flows_n; - DRV_LOG(DEBUG, "port %u %p type %d QP %p ibv_flow %p", - dev->data->port_id, - (void *)flow, i, - (void *)flow->frxq[i].hrxq, - (void *)flow->frxq[i].ibv_flow); } if (!flows_n) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "internal error in flow creation"); goto error; } - for (i = 0; i != parser->rss_conf.queue_num; ++i) { - struct mlx5_rxq_data *q = - (*priv->rxqs)[parser->rss_conf.queue[i]]; - - q->mark |= parser->mark; - } + mlx5_flow_create_update_rxqs(dev, flow); return 0; error: ret = rte_errno; /* Save rte_errno before cleanup. */ @@ -2104,6 +2270,7 @@ mlx5_flow_list_create(struct rte_eth_dev *dev, } /* Copy configuration. */ flow->queues = (uint16_t (*)[])(flow + 1); + flow->tunnel = parser.tunnel; flow->rss_conf = (struct rte_flow_action_rss){ .func = RTE_ETH_HASH_FUNCTION_DEFAULT, .level = 0, @@ -2195,9 +2362,38 @@ mlx5_flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list, struct priv *priv = dev->data->dev_private; unsigned int i; - if (flow->drop || !flow->mark) + if (flow->drop || !dev->data->dev_started) goto free; - for (i = 0; i != flow->rss_conf.queue_num; ++i) { + for (i = 0; flow->tunnel && i != flow->rss_conf.queue_num; ++i) { + /* Update queue tunnel type. */ + struct mlx5_rxq_data *rxq_data = (*priv->rxqs) + [(*flow->queues)[i]]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(rxq_data, struct mlx5_rxq_ctrl, rxq); + uint8_t tunnel = PTYPE_IDX(flow->tunnel); + + assert(rxq_ctrl->tunnel_types[tunnel] > 0); + rxq_ctrl->tunnel_types[tunnel] -= 1; + if (!rxq_ctrl->tunnel_types[tunnel]) { + /* Update tunnel type. */ + uint8_t j; + uint8_t types = 0; + uint8_t last; + + for (j = 0; j < RTE_DIM(rxq_ctrl->tunnel_types); j++) + if (rxq_ctrl->tunnel_types[j]) { + types += 1; + last = j; + } + /* Keep same if more than one tunnel types left. */ + if (types == 1) + rxq_data->tunnel = ptype_ext[last]; + else if (types == 0) + /* No tunnel type left. */ + rxq_data->tunnel = 0; + } + } + for (i = 0; flow->mark && i != flow->rss_conf.queue_num; ++i) { struct rte_flow *tmp; int mark = 0; @@ -2416,9 +2612,9 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) { struct priv *priv = dev->data->dev_private; struct rte_flow *flow; + unsigned int i; TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) { - unsigned int i; struct mlx5_ind_table_ibv *ind_tbl = NULL; if (flow->drop) { @@ -2464,6 +2660,18 @@ mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list) DRV_LOG(DEBUG, "port %u flow %p removed", dev->data->port_id, (void *)flow); } + /* Cleanup Rx queue tunnel info. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct mlx5_rxq_data *q = (*priv->rxqs)[i]; + struct mlx5_rxq_ctrl *rxq_ctrl = + container_of(q, struct mlx5_rxq_ctrl, rxq); + + if (!q) + continue; + memset((void *)rxq_ctrl->tunnel_types, 0, + sizeof(rxq_ctrl->tunnel_types)); + q->tunnel = 0; + } } /** @@ -2509,42 +2717,44 @@ mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list) flow->frxq[i].hrxq = mlx5_hrxq_get(dev, flow->rss_conf.key, flow->rss_conf.key_len, - hash_rxq_init[i].hash_fields, + flow->frxq[i].hash_fields, flow->rss_conf.queue, - flow->rss_conf.queue_num); + flow->rss_conf.queue_num, + flow->tunnel, + flow->rss_conf.level); if (flow->frxq[i].hrxq) goto flow_create; flow->frxq[i].hrxq = mlx5_hrxq_new(dev, flow->rss_conf.key, flow->rss_conf.key_len, - hash_rxq_init[i].hash_fields, + flow->frxq[i].hash_fields, flow->rss_conf.queue, - flow->rss_conf.queue_num); + flow->rss_conf.queue_num, + flow->tunnel, + flow->rss_conf.level); if (!flow->frxq[i].hrxq) { DRV_LOG(DEBUG, - "port %u flow %p cannot be applied", + "port %u flow %p cannot create hash" + " rxq", dev->data->port_id, (void *)flow); rte_errno = EINVAL; return -rte_errno; } flow_create: + mlx5_flow_dump(dev, flow, i); flow->frxq[i].ibv_flow = mlx5_glue->create_flow(flow->frxq[i].hrxq->qp, flow->frxq[i].ibv_attr); if (!flow->frxq[i].ibv_flow) { DRV_LOG(DEBUG, - "port %u flow %p cannot be applied", - dev->data->port_id, (void *)flow); + "port %u flow %p type %u cannot be" + " applied", + dev->data->port_id, (void *)flow, i); rte_errno = EINVAL; return -rte_errno; } - DRV_LOG(DEBUG, "port %u flow %p applied", - dev->data->port_id, (void *)flow); } - if (!flow->mark) - continue; - for (i = 0; i != flow->rss_conf.queue_num; ++i) - (*priv->rxqs)[flow->rss_conf.queue[i]]->mark = 1; + mlx5_flow_create_update_rxqs(dev, flow); } return 0; }