X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_verbs.c;h=459e7b64835dacfd89c2c59364775e9d49fe6d6a;hb=9c84acab47e8eee2736b70edc54f36bb1bc17c57;hp=43fcd0d29edd82a86dc9ecc3bda598e6e39261a8;hpb=4a78c88e3bae1f298eaed2793e0b7c9052a01c6b;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 43fcd0d29e..459e7b6483 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -20,18 +20,19 @@ #include #include -#include #include #include #include #include #include -#include "mlx5.h" +#include +#include + #include "mlx5_defs.h" -#include "mlx5_prm.h" -#include "mlx5_glue.h" +#include "mlx5.h" #include "mlx5_flow.h" +#include "mlx5_rxtx.h" #define VERBS_SPEC_INNER(item_flags) \ (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0) @@ -55,23 +56,26 @@ flow_verbs_counter_create(struct rte_eth_dev *dev, struct mlx5_flow_counter *counter) { #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_context *ctx = priv->sh->ctx; struct ibv_counter_set_init_attr init = { .counter_set_id = counter->id}; - counter->cs = mlx5_glue->create_counter_set(priv->ctx, &init); + counter->cs = mlx5_glue->create_counter_set(ctx, &init); if (!counter->cs) { rte_errno = ENOTSUP; return -ENOTSUP; } return 0; #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; + struct ibv_context *ctx = priv->sh->ctx; struct ibv_counters_init_attr init = {0}; - struct ibv_counter_attach_attr attach = {0}; + struct ibv_counter_attach_attr attach; int ret; - counter->cs = mlx5_glue->create_counters(priv->ctx, &init); + memset(&attach, 0, sizeof(attach)); + counter->cs = mlx5_glue->create_counters(ctx, &init); if (!counter->cs) { rte_errno = ENOTSUP; return -ENOTSUP; @@ -116,17 +120,17 @@ flow_verbs_counter_create(struct rte_eth_dev *dev, static struct mlx5_flow_counter * flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) { - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_counter *cnt; int ret; - LIST_FOREACH(cnt, &priv->flow_counters, next) { - if (!cnt->shared || cnt->shared != shared) - continue; - if (cnt->id != id) - continue; - cnt->ref_cnt++; - return cnt; + if (shared) { + TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) { + if (cnt->shared && cnt->id == id) { + cnt->ref_cnt++; + return cnt; + } + } } cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); if (!cnt) { @@ -141,7 +145,7 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) /* Create counter with Verbs. */ ret = flow_verbs_counter_create(dev, cnt); if (!ret) { - LIST_INSERT_HEAD(&priv->flow_counters, cnt, next); + TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next); return cnt; } /* Some error occurred in Verbs library. */ @@ -153,19 +157,24 @@ flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) /** * Release a flow counter. * + * @param[in] dev + * Pointer to the Ethernet device structure. * @param[in] counter * Pointer to the counter handler. */ static void -flow_verbs_counter_release(struct mlx5_flow_counter *counter) +flow_verbs_counter_release(struct rte_eth_dev *dev, + struct mlx5_flow_counter *counter) { + struct mlx5_priv *priv = dev->data->dev_private; + if (--counter->ref_cnt == 0) { #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) claim_zero(mlx5_glue->destroy_counter_set(counter->cs)); #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) claim_zero(mlx5_glue->destroy_counters(counter->cs)); #endif - LIST_REMOVE(counter, next); + TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next); rte_free(counter); } } @@ -183,7 +192,7 @@ flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, { #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) - if (flow->actions & MLX5_FLOW_ACTION_COUNT) { + if (flow->counter && flow->counter->cs) { struct rte_flow_query_count *qc = data; uint64_t counters[2] = {0, 0}; #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) @@ -250,7 +259,7 @@ flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size) if (!verbs) return; - assert(verbs->specs); + MLX5_ASSERT(verbs->specs); dst = (void *)(verbs->specs + verbs->size); memcpy(dst, src, size); ++verbs->attr->num_of_specs; @@ -287,14 +296,18 @@ flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow, if (spec) { unsigned int i; - memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN); - memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN); + memcpy(ð.val.dst_mac, spec->dst.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(ð.val.src_mac, spec->src.addr_bytes, + RTE_ETHER_ADDR_LEN); eth.val.ether_type = spec->type; - memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN); - memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN); + memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, + RTE_ETHER_ADDR_LEN); + memcpy(ð.mask.src_mac, mask->src.addr_bytes, + RTE_ETHER_ADDR_LEN); eth.mask.ether_type = mask->type; /* Remove unwanted bits from values. */ - for (i = 0; i < ETHER_ADDR_LEN; ++i) { + for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) { eth.val.dst_mac[i] &= eth.mask.dst_mac[i]; eth.val.src_mac[i] &= eth.mask.src_mac[i]; } @@ -379,6 +392,9 @@ flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow, flow_verbs_spec_add(&dev_flow->verbs, ð, size); else flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð); + if (!tunnel) + dev_flow->verbs.vf_vlan.tag = + rte_be_to_cpu_16(spec->tci) & 0x0fff; } /** @@ -473,19 +489,17 @@ flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow, vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow); vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow); ipv6.val.flow_label = - rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >> - IPV6_HDR_FL_SHIFT); - ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >> - IPV6_HDR_TC_SHIFT; + rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >> + RTE_IPV6_HDR_FL_SHIFT); + ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT; ipv6.val.next_hdr = spec->hdr.proto; - ipv6.val.hop_limit = spec->hdr.hop_limits; ipv6.mask.flow_label = - rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >> - IPV6_HDR_FL_SHIFT); - ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >> - IPV6_HDR_TC_SHIFT; + rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >> + RTE_IPV6_HDR_FL_SHIFT); + ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >> + RTE_IPV6_HDR_TC_SHIFT; ipv6.mask.next_hdr = mask->hdr.proto; - ipv6.mask.hop_limit = mask->hdr.hop_limits; /* Remove unwanted bits from values. */ for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) { ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i]; @@ -494,7 +508,6 @@ flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow, ipv6.val.flow_label &= ipv6.mask.flow_label; ipv6.val.traffic_class &= ipv6.mask.traffic_class; ipv6.val.next_hdr &= ipv6.mask.next_hdr; - ipv6.val.hop_limit &= ipv6.mask.hop_limit; } flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size); } @@ -849,8 +862,8 @@ flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow, const struct rte_flow_action_queue *queue = action->conf; struct rte_flow *flow = dev_flow->flow; - if (flow->queue) - (*flow->queue)[0] = queue->index; + if (flow->rss.queue) + (*flow->rss.queue)[0] = queue->index; flow->rss.queue_num = 1; } @@ -874,16 +887,17 @@ flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow, const uint8_t *rss_key; struct rte_flow *flow = dev_flow->flow; - if (flow->queue) - memcpy((*flow->queue), rss->queue, + if (flow->rss.queue) + memcpy((*flow->rss.queue), rss->queue, rss->queue_num * sizeof(uint16_t)); flow->rss.queue_num = rss->queue_num; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ - flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; - flow->rss.level = rss->level; + memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance when expanding + * items for RSS. + */ } /** @@ -1001,6 +1015,8 @@ flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, * Pointer to the list of items. * @param[in] actions * Pointer to the list of actions. + * @param[in] external + * This flow rule is created by request external to PMD. * @param[out] error * Pointer to the error structure. * @@ -1012,12 +1028,15 @@ flow_verbs_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], + bool external __rte_unused, struct rte_flow_error *error) { int ret; uint64_t action_flags = 0; uint64_t item_flags = 0; + uint64_t last_item = 0; uint8_t next_protocol = 0xff; + uint16_t ether_type = 0; if (items == NULL) return -1; @@ -1036,46 +1055,86 @@ flow_verbs_validate(struct rte_eth_dev *dev, error); if (ret < 0) return ret; - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : - MLX5_FLOW_LAYER_OUTER_L2; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : + MLX5_FLOW_LAYER_OUTER_L2; + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_eth *) + items->spec)->type; + ether_type &= + ((const struct rte_flow_item_eth *) + items->mask)->type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } break; case RTE_FLOW_ITEM_TYPE_VLAN: ret = mlx5_flow_validate_item_vlan(items, item_flags, - error); + dev, error); if (ret < 0) return ret; - item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | - MLX5_FLOW_LAYER_INNER_VLAN) : - (MLX5_FLOW_LAYER_OUTER_L2 | - MLX5_FLOW_LAYER_OUTER_VLAN); + last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | + MLX5_FLOW_LAYER_INNER_VLAN) : + (MLX5_FLOW_LAYER_OUTER_L2 | + MLX5_FLOW_LAYER_OUTER_VLAN); + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_vlan *) + items->spec)->inner_type; + ether_type &= + ((const struct rte_flow_item_vlan *) + items->mask)->inner_type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } break; case RTE_FLOW_ITEM_TYPE_IPV4: ret = mlx5_flow_validate_item_ipv4(items, item_flags, + last_item, + ether_type, NULL, error); if (ret < 0) return ret; - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : - MLX5_FLOW_LAYER_OUTER_L3_IPV4; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : + MLX5_FLOW_LAYER_OUTER_L3_IPV4; if (items->mask != NULL && ((const struct rte_flow_item_ipv4 *) - items->mask)->hdr.next_proto_id) + items->mask)->hdr.next_proto_id) { next_protocol = ((const struct rte_flow_item_ipv4 *) (items->spec))->hdr.next_proto_id; + next_protocol &= + ((const struct rte_flow_item_ipv4 *) + (items->mask))->hdr.next_proto_id; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } break; case RTE_FLOW_ITEM_TYPE_IPV6: ret = mlx5_flow_validate_item_ipv6(items, item_flags, + last_item, + ether_type, NULL, error); if (ret < 0) return ret; - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : - MLX5_FLOW_LAYER_OUTER_L3_IPV6; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : + MLX5_FLOW_LAYER_OUTER_L3_IPV6; if (items->mask != NULL && ((const struct rte_flow_item_ipv6 *) - items->mask)->hdr.proto) + items->mask)->hdr.proto) { next_protocol = ((const struct rte_flow_item_ipv6 *) items->spec)->hdr.proto; + next_protocol &= + ((const struct rte_flow_item_ipv6 *) + items->mask)->hdr.proto; + } else { + /* Reset for inner layer. */ + next_protocol = 0xff; + } break; case RTE_FLOW_ITEM_TYPE_UDP: ret = mlx5_flow_validate_item_udp(items, item_flags, @@ -1083,8 +1142,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, error); if (ret < 0) return ret; - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : - MLX5_FLOW_LAYER_OUTER_L4_UDP; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : + MLX5_FLOW_LAYER_OUTER_L4_UDP; break; case RTE_FLOW_ITEM_TYPE_TCP: ret = mlx5_flow_validate_item_tcp @@ -1094,15 +1153,15 @@ flow_verbs_validate(struct rte_eth_dev *dev, error); if (ret < 0) return ret; - item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : - MLX5_FLOW_LAYER_OUTER_L4_TCP; + last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : + MLX5_FLOW_LAYER_OUTER_L4_TCP; break; case RTE_FLOW_ITEM_TYPE_VXLAN: ret = mlx5_flow_validate_item_vxlan(items, item_flags, error); if (ret < 0) return ret; - item_flags |= MLX5_FLOW_LAYER_VXLAN; + last_item = MLX5_FLOW_LAYER_VXLAN; break; case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: ret = mlx5_flow_validate_item_vxlan_gpe(items, @@ -1110,35 +1169,29 @@ flow_verbs_validate(struct rte_eth_dev *dev, dev, error); if (ret < 0) return ret; - item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; + last_item = MLX5_FLOW_LAYER_VXLAN_GPE; break; case RTE_FLOW_ITEM_TYPE_GRE: ret = mlx5_flow_validate_item_gre(items, item_flags, next_protocol, error); if (ret < 0) return ret; - item_flags |= MLX5_FLOW_LAYER_GRE; + last_item = MLX5_FLOW_LAYER_GRE; break; case RTE_FLOW_ITEM_TYPE_MPLS: - ret = mlx5_flow_validate_item_mpls(items, item_flags, - next_protocol, - error); + ret = mlx5_flow_validate_item_mpls(dev, items, + item_flags, + last_item, error); if (ret < 0) return ret; - if (next_protocol != 0xff && - next_protocol != IPPROTO_MPLS) - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_ITEM, items, - "protocol filtering not compatible" - " with MPLS layer"); - item_flags |= MLX5_FLOW_LAYER_MPLS; + last_item = MLX5_FLOW_LAYER_MPLS; break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "item not supported"); } + item_flags |= last_item; } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -1181,7 +1234,7 @@ flow_verbs_validate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_RSS: ret = mlx5_flow_validate_action_rss(actions, action_flags, dev, - attr, + attr, item_flags, error); if (ret < 0) return ret; @@ -1200,6 +1253,18 @@ flow_verbs_validate(struct rte_eth_dev *dev, "action not supported"); } } + /* + * Validate the drop action mutual exclusion with other actions. + * Drop action is mutually-exclusive with any other action, except for + * Count action. + */ + if ((action_flags & MLX5_FLOW_ACTION_DROP) && + (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Drop action is mutually-exclusive " + "with any other action, except for " + "Count action"); if (!(action_flags & MLX5_FLOW_FATE_ACTIONS)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -1209,23 +1274,18 @@ flow_verbs_validate(struct rte_eth_dev *dev, /** * Calculate the required bytes that are needed for the action part of the verbs - * flow, in addtion returns bit-fields with all the detected action, in order to - * avoid another interation over the actions. + * flow. * * @param[in] actions * Pointer to the list of actions. - * @param[out] action_flags - * Pointer to the detected actions. * * @return * The size of the memory needed for all actions. */ static int -flow_verbs_get_actions_and_size(const struct rte_flow_action actions[], - uint64_t *action_flags) +flow_verbs_get_actions_size(const struct rte_flow_action actions[]) { int size = 0; - uint64_t detected_actions = 0; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -1233,128 +1293,89 @@ flow_verbs_get_actions_and_size(const struct rte_flow_action actions[], break; case RTE_FLOW_ACTION_TYPE_FLAG: size += sizeof(struct ibv_flow_spec_action_tag); - detected_actions |= MLX5_FLOW_ACTION_FLAG; break; case RTE_FLOW_ACTION_TYPE_MARK: size += sizeof(struct ibv_flow_spec_action_tag); - detected_actions |= MLX5_FLOW_ACTION_MARK; break; case RTE_FLOW_ACTION_TYPE_DROP: size += sizeof(struct ibv_flow_spec_action_drop); - detected_actions |= MLX5_FLOW_ACTION_DROP; break; case RTE_FLOW_ACTION_TYPE_QUEUE: - detected_actions |= MLX5_FLOW_ACTION_QUEUE; break; case RTE_FLOW_ACTION_TYPE_RSS: - detected_actions |= MLX5_FLOW_ACTION_RSS; break; case RTE_FLOW_ACTION_TYPE_COUNT: #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) size += sizeof(struct ibv_flow_spec_counter_action); #endif - detected_actions |= MLX5_FLOW_ACTION_COUNT; break; default: break; } } - *action_flags = detected_actions; return size; } /** * Calculate the required bytes that are needed for the item part of the verbs - * flow, in addtion returns bit-fields with all the detected action, in order to - * avoid another interation over the actions. + * flow. * - * @param[in] actions + * @param[in] items * Pointer to the list of items. - * @param[in, out] item_flags - * Pointer to the detected items. * * @return * The size of the memory needed for all items. */ static int -flow_verbs_get_items_and_size(const struct rte_flow_item items[], - uint64_t *item_flags) +flow_verbs_get_items_size(const struct rte_flow_item items[]) { int size = 0; - uint64_t detected_items = 0; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { - int tunnel = !!(detected_items & MLX5_FLOW_LAYER_TUNNEL); - switch (items->type) { case RTE_FLOW_ITEM_TYPE_VOID: break; case RTE_FLOW_ITEM_TYPE_ETH: size += sizeof(struct ibv_flow_spec_eth); - detected_items |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 : - MLX5_FLOW_LAYER_OUTER_L2; break; case RTE_FLOW_ITEM_TYPE_VLAN: size += sizeof(struct ibv_flow_spec_eth); - detected_items |= - tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | - MLX5_FLOW_LAYER_INNER_VLAN) : - (MLX5_FLOW_LAYER_OUTER_L2 | - MLX5_FLOW_LAYER_OUTER_VLAN); break; case RTE_FLOW_ITEM_TYPE_IPV4: size += sizeof(struct ibv_flow_spec_ipv4_ext); - detected_items |= tunnel ? - MLX5_FLOW_LAYER_INNER_L3_IPV4 : - MLX5_FLOW_LAYER_OUTER_L3_IPV4; break; case RTE_FLOW_ITEM_TYPE_IPV6: size += sizeof(struct ibv_flow_spec_ipv6); - detected_items |= tunnel ? - MLX5_FLOW_LAYER_INNER_L3_IPV6 : - MLX5_FLOW_LAYER_OUTER_L3_IPV6; break; case RTE_FLOW_ITEM_TYPE_UDP: size += sizeof(struct ibv_flow_spec_tcp_udp); - detected_items |= tunnel ? - MLX5_FLOW_LAYER_INNER_L4_UDP : - MLX5_FLOW_LAYER_OUTER_L4_UDP; break; case RTE_FLOW_ITEM_TYPE_TCP: size += sizeof(struct ibv_flow_spec_tcp_udp); - detected_items |= tunnel ? - MLX5_FLOW_LAYER_INNER_L4_TCP : - MLX5_FLOW_LAYER_OUTER_L4_TCP; break; case RTE_FLOW_ITEM_TYPE_VXLAN: size += sizeof(struct ibv_flow_spec_tunnel); - detected_items |= MLX5_FLOW_LAYER_VXLAN; break; case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: size += sizeof(struct ibv_flow_spec_tunnel); - detected_items |= MLX5_FLOW_LAYER_VXLAN_GPE; break; #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT case RTE_FLOW_ITEM_TYPE_GRE: size += sizeof(struct ibv_flow_spec_gre); - detected_items |= MLX5_FLOW_LAYER_GRE; break; case RTE_FLOW_ITEM_TYPE_MPLS: size += sizeof(struct ibv_flow_spec_mpls); - detected_items |= MLX5_FLOW_LAYER_MPLS; break; #else case RTE_FLOW_ITEM_TYPE_GRE: size += sizeof(struct ibv_flow_spec_tunnel); - detected_items |= MLX5_FLOW_LAYER_TUNNEL; break; #endif default: break; } } - *item_flags = detected_items; return size; } @@ -1369,10 +1390,6 @@ flow_verbs_get_items_and_size(const struct rte_flow_item items[], * Pointer to the list of items. * @param[in] actions * Pointer to the list of actions. - * @param[out] item_flags - * Pointer to bit mask of all items detected. - * @param[out] action_flags - * Pointer to bit mask of all actions detected. * @param[out] error * Pointer to the error structure. * @@ -1384,26 +1401,25 @@ static struct mlx5_flow * flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[], const struct rte_flow_action actions[], - uint64_t *item_flags, - uint64_t *action_flags, struct rte_flow_error *error) { - uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr); - struct mlx5_flow *flow; + size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr); + struct mlx5_flow *dev_flow; - size += flow_verbs_get_actions_and_size(actions, action_flags); - size += flow_verbs_get_items_and_size(items, item_flags); - flow = rte_calloc(__func__, 1, size, 0); - if (!flow) { + size += flow_verbs_get_actions_size(actions); + size += flow_verbs_get_items_size(items); + dev_flow = rte_calloc(__func__, 1, size, 0); + if (!dev_flow) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "not enough memory to create flow"); return NULL; } - flow->verbs.attr = (void *)(flow + 1); - flow->verbs.specs = - (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr); - return flow; + dev_flow->verbs.attr = (void *)(dev_flow + 1); + dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1); + dev_flow->ingress = attr->ingress; + dev_flow->transfer = attr->transfer; + return dev_flow; } /** @@ -1423,7 +1439,7 @@ flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused, * Pointer to the error structure. * * @return - * 0 on success, else a negative errno value otherwise and rte_ernno is set. + * 0 on success, else a negative errno value otherwise and rte_errno is set. */ static int flow_verbs_translate(struct rte_eth_dev *dev, @@ -1433,12 +1449,11 @@ flow_verbs_translate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct rte_flow *flow = dev_flow->flow; uint64_t item_flags = 0; uint64_t action_flags = 0; uint64_t priority = attr->priority; uint32_t subpriority = 0; - struct priv *priv = dev->data->dev_private; + struct mlx5_priv *priv = dev->data->dev_private; if (priority == MLX5_FLOW_PRIO_RSVD) priority = priv->config.flow_prio - 1; @@ -1483,7 +1498,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, "action not supported"); } } - flow->actions = action_flags; + dev_flow->actions = action_flags; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); @@ -1510,7 +1525,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, flow_verbs_translate_item_ipv4(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L3; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV4_LAYER_TYPES, @@ -1522,7 +1537,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, flow_verbs_translate_item_ipv6(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L3; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, MLX5_IPV6_LAYER_TYPES, @@ -1534,7 +1549,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, flow_verbs_translate_item_tcp(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L4; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_TCP, (IBV_RX_HASH_SRC_PORT_TCP | @@ -1546,7 +1561,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, flow_verbs_translate_item_udp(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L4; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust (dev_flow, tunnel, ETH_RSS_UDP, (IBV_RX_HASH_SRC_PORT_UDP | @@ -1588,6 +1603,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, dev_flow->layers = item_flags; dev_flow->verbs.attr->priority = mlx5_flow_adjust_priority(dev, priority, subpriority); + dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port; return 0; } @@ -1614,12 +1630,16 @@ flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow) verbs->flow = NULL; } if (verbs->hrxq) { - if (flow->actions & MLX5_FLOW_ACTION_DROP) + if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) mlx5_hrxq_drop_release(dev); else mlx5_hrxq_release(dev, verbs->hrxq); verbs->hrxq = NULL; } + if (dev_flow->verbs.vf_vlan.tag && + dev_flow->verbs.vf_vlan.created) { + mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan); + } } } @@ -1645,7 +1665,7 @@ flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) rte_free(dev_flow); } if (flow->counter) { - flow_verbs_counter_release(flow->counter); + flow_verbs_counter_release(dev, flow->counter); flow->counter = NULL; } } @@ -1667,13 +1687,14 @@ static int flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { + struct mlx5_priv *priv = dev->data->dev_private; struct mlx5_flow_verbs *verbs; struct mlx5_flow *dev_flow; int err; LIST_FOREACH(dev_flow, &flow->dev_flows, next) { verbs = &dev_flow->verbs; - if (flow->actions & MLX5_FLOW_ACTION_DROP) { + if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) { verbs->hrxq = mlx5_hrxq_drop_new(dev); if (!verbs->hrxq) { rte_flow_error_set @@ -1685,19 +1706,20 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, } else { struct mlx5_hrxq *hrxq; - hrxq = mlx5_hrxq_get(dev, flow->key, + MLX5_ASSERT(flow->rss.queue); + hrxq = mlx5_hrxq_get(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num); if (!hrxq) - hrxq = mlx5_hrxq_new(dev, flow->key, + hrxq = mlx5_hrxq_new(dev, flow->rss.key, MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), + dev_flow->hash_fields, + (*flow->rss.queue), flow->rss.queue_num, !!(dev_flow->layers & - MLX5_FLOW_LAYER_TUNNEL)); + MLX5_FLOW_LAYER_TUNNEL)); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -1716,6 +1738,17 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "hardware refuses to create flow"); goto error; } + if (priv->vmwa_context && + dev_flow->verbs.vf_vlan.tag && + !dev_flow->verbs.vf_vlan.created) { + /* + * The rule contains the VLAN pattern. + * For VF we are going to create VLAN + * interface to make hypervisor set correct + * e-Switch vport context. + */ + mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan); + } } return 0; error: @@ -1723,12 +1756,16 @@ error: LIST_FOREACH(dev_flow, &flow->dev_flows, next) { verbs = &dev_flow->verbs; if (verbs->hrxq) { - if (flow->actions & MLX5_FLOW_ACTION_DROP) + if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) mlx5_hrxq_drop_release(dev); else mlx5_hrxq_release(dev, verbs->hrxq); verbs->hrxq = NULL; } + if (dev_flow->verbs.vf_vlan.tag && + dev_flow->verbs.vf_vlan.created) { + mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan); + } } rte_errno = err; /* Restore rte_errno. */ return -rte_errno;