X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_verbs.c;h=59291fbd09a2d0fce72bd6846d248eba8911fbc8;hb=f9210259cac7;hp=2f4c80cb074b4156d4193fe9518d845e76aaefe2;hpb=24ac604ef7469eb5773c2504b313dd00257f8df3;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_verbs.c b/drivers/net/mlx5/mlx5_flow_verbs.c index 2f4c80cb07..59291fbd09 100644 --- a/drivers/net/mlx5/mlx5_flow_verbs.c +++ b/drivers/net/mlx5/mlx5_flow_verbs.c @@ -8,16 +8,6 @@ #include #include -/* Verbs header. */ -/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ -#ifdef PEDANTIC -#pragma GCC diagnostic ignored "-Wpedantic" -#endif -#include -#ifdef PEDANTIC -#pragma GCC diagnostic error "-Wpedantic" -#endif - #include #include #include @@ -26,16 +16,159 @@ #include #include -#include "mlx5.h" +#include +#include +#include + #include "mlx5_defs.h" +#include "mlx5.h" #include "mlx5_flow.h" -#include "mlx5_glue.h" -#include "mlx5_prm.h" #include "mlx5_rxtx.h" #define VERBS_SPEC_INNER(item_flags) \ (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0) +/* Map of Verbs to Flow priority with 8 Verbs priorities. */ +static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 }, +}; + +/* Map of Verbs to Flow priority with 16 Verbs priorities. */ +static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = { + { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 }, + { 9, 10, 11 }, { 12, 13, 14 }, +}; + +/** + * Discover the maximum number of priority available. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * number of supported flow priority on success, a negative errno + * value otherwise and rte_errno is set. + */ +int +mlx5_flow_discover_priorities(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct { + struct ibv_flow_attr attr; + struct ibv_flow_spec_eth eth; + struct ibv_flow_spec_action_drop drop; + } flow_attr = { + .attr = { + .num_of_specs = 2, + .port = (uint8_t)priv->dev_port, + }, + .eth = { + .type = IBV_FLOW_SPEC_ETH, + .size = sizeof(struct ibv_flow_spec_eth), + }, + .drop = { + .size = sizeof(struct ibv_flow_spec_action_drop), + .type = IBV_FLOW_SPEC_ACTION_DROP, + }, + }; + struct ibv_flow *flow; + struct mlx5_hrxq *drop = priv->drop_queue.hrxq; + uint16_t vprio[] = { 8, 16 }; + int i; + int priority = 0; + + if (!drop->qp) { + rte_errno = ENOTSUP; + return -rte_errno; + } + for (i = 0; i != RTE_DIM(vprio); i++) { + flow_attr.attr.priority = vprio[i] - 1; + flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr); + if (!flow) + break; + claim_zero(mlx5_glue->destroy_flow(flow)); + priority = vprio[i]; + } + switch (priority) { + case 8: + priority = RTE_DIM(priority_map_3); + break; + case 16: + priority = RTE_DIM(priority_map_5); + break; + default: + rte_errno = ENOTSUP; + DRV_LOG(ERR, + "port %u verbs maximum priority: %d expected 8/16", + dev->data->port_id, priority); + return -rte_errno; + } + DRV_LOG(INFO, "port %u flow maximum priority: %d", + dev->data->port_id, priority); + return priority; +} + +/** + * Adjust flow priority based on the highest layer and the request priority. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] priority + * The rule base priority. + * @param[in] subpriority + * The priority based on the items. + * + * @return + * The new priority. + */ +uint32_t +mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, + uint32_t subpriority) +{ + uint32_t res = 0; + struct mlx5_priv *priv = dev->data->dev_private; + + switch (priv->config.flow_prio) { + case RTE_DIM(priority_map_3): + res = priority_map_3[priority][subpriority]; + break; + case RTE_DIM(priority_map_5): + res = priority_map_5[priority][subpriority]; + break; + } + return res; +} + +/** + * Get Verbs flow counter by index. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * mlx5 flow counter index in the container. + * @param[out] ppool + * mlx5 flow counter pool in the container, + * + * @return + * A pointer to the counter, NULL otherwise. + */ +static struct mlx5_flow_counter * +flow_verbs_counter_get_by_idx(struct rte_eth_dev *dev, + uint32_t idx, + struct mlx5_flow_counter_pool **ppool) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + struct mlx5_flow_counter_pool *pool; + + idx = (idx - 1) & (MLX5_CNT_SHARED_OFFSET - 1); + pool = cmng->pools[idx / MLX5_COUNTERS_PER_POOL]; + MLX5_ASSERT(pool); + if (ppool) + *ppool = pool; + return MLX5_POOL_GET_CNT(pool, idx % MLX5_COUNTERS_PER_POOL); +} + /** * Create Verbs flow counter with Verbs library. * @@ -58,10 +191,10 @@ flow_verbs_counter_create(struct rte_eth_dev *dev, struct mlx5_priv *priv = dev->data->dev_private; struct ibv_context *ctx = priv->sh->ctx; struct ibv_counter_set_init_attr init = { - .counter_set_id = counter->id}; + .counter_set_id = counter->shared_info.id}; - counter->cs = mlx5_glue->create_counter_set(ctx, &init); - if (!counter->cs) { + counter->dcs_when_free = mlx5_glue->create_counter_set(ctx, &init); + if (!counter->dcs_when_free) { rte_errno = ENOTSUP; return -ENOTSUP; } @@ -74,23 +207,23 @@ flow_verbs_counter_create(struct rte_eth_dev *dev, int ret; memset(&attach, 0, sizeof(attach)); - counter->cs = mlx5_glue->create_counters(ctx, &init); - if (!counter->cs) { + counter->dcs_when_free = mlx5_glue->create_counters(ctx, &init); + if (!counter->dcs_when_free) { rte_errno = ENOTSUP; return -ENOTSUP; } attach.counter_desc = IBV_COUNTER_PACKETS; attach.index = 0; - ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL); + ret = mlx5_glue->attach_counters(counter->dcs_when_free, &attach, NULL); if (!ret) { attach.counter_desc = IBV_COUNTER_BYTES; attach.index = 1; ret = mlx5_glue->attach_counters - (counter->cs, &attach, NULL); + (counter->dcs_when_free, &attach, NULL); } if (ret) { - claim_zero(mlx5_glue->destroy_counters(counter->cs)); - counter->cs = NULL; + claim_zero(mlx5_glue->destroy_counters(counter->dcs_when_free)); + counter->dcs_when_free = NULL; rte_errno = ret; return -ret; } @@ -114,63 +247,118 @@ flow_verbs_counter_create(struct rte_eth_dev *dev, * Counter identifier. * * @return - * A pointer to the counter, NULL otherwise and rte_errno is set. + * Index to the counter, 0 otherwise and rte_errno is set. */ -static struct mlx5_flow_counter * +static uint32_t flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id) { struct mlx5_priv *priv = dev->data->dev_private; - struct mlx5_flow_counter *cnt; + struct mlx5_flow_counter_mng *cmng = &priv->sh->cmng; + struct mlx5_flow_counter_pool *pool = NULL; + struct mlx5_flow_counter *cnt = NULL; + union mlx5_l3t_data data; + uint32_t n_valid = cmng->n_valid; + uint32_t pool_idx, cnt_idx; + uint32_t i; int ret; - if (shared) { - LIST_FOREACH(cnt, &priv->flow_counters, next) { - if (cnt->shared && cnt->id == id) { - cnt->ref_cnt++; - return cnt; + if (shared && !mlx5_l3t_get_entry(priv->sh->cnt_id_tbl, id, &data) && + data.dword) + return data.dword; + for (pool_idx = 0; pool_idx < n_valid; ++pool_idx) { + pool = cmng->pools[pool_idx]; + if (!pool) + continue; + cnt = TAILQ_FIRST(&pool->counters[0]); + if (cnt) + break; + } + if (!cnt) { + struct mlx5_flow_counter_pool **pools; + uint32_t size; + + if (n_valid == cmng->n) { + /* Resize the container pool array. */ + size = sizeof(struct mlx5_flow_counter_pool *) * + (n_valid + MLX5_CNT_CONTAINER_RESIZE); + pools = mlx5_malloc(MLX5_MEM_ZERO, size, 0, + SOCKET_ID_ANY); + if (!pools) + return 0; + if (n_valid) { + memcpy(pools, cmng->pools, + sizeof(struct mlx5_flow_counter_pool *) * + n_valid); + mlx5_free(cmng->pools); } + cmng->pools = pools; + cmng->n += MLX5_CNT_CONTAINER_RESIZE; } + /* Allocate memory for new pool*/ + size = sizeof(*pool) + sizeof(*cnt) * MLX5_COUNTERS_PER_POOL; + pool = mlx5_malloc(MLX5_MEM_ZERO, size, 0, SOCKET_ID_ANY); + if (!pool) + return 0; + for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) { + cnt = MLX5_POOL_GET_CNT(pool, i); + TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); + } + cnt = MLX5_POOL_GET_CNT(pool, 0); + cmng->pools[n_valid] = pool; + pool_idx = n_valid; + cmng->n_valid++; } - cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0); - if (!cnt) { - rte_errno = ENOMEM; - return NULL; + TAILQ_REMOVE(&pool->counters[0], cnt, next); + i = MLX5_CNT_ARRAY_IDX(pool, cnt); + cnt_idx = MLX5_MAKE_CNT_IDX(pool_idx, i); + if (shared) { + data.dword = cnt_idx; + if (mlx5_l3t_set_entry(priv->sh->cnt_id_tbl, id, &data)) + return 0; + cnt->shared_info.id = id; + cnt_idx |= MLX5_CNT_SHARED_OFFSET; } - cnt->id = id; - cnt->shared = shared; - cnt->ref_cnt = 1; - cnt->hits = 0; - cnt->bytes = 0; /* Create counter with Verbs. */ ret = flow_verbs_counter_create(dev, cnt); if (!ret) { - LIST_INSERT_HEAD(&priv->flow_counters, cnt, next); - return cnt; + cnt->dcs_when_active = cnt->dcs_when_free; + cnt->hits = 0; + cnt->bytes = 0; + return cnt_idx; } + TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); /* Some error occurred in Verbs library. */ - rte_free(cnt); rte_errno = -ret; - return NULL; + return 0; } /** * Release a flow counter. * + * @param[in] dev + * Pointer to the Ethernet device structure. * @param[in] counter - * Pointer to the counter handler. + * Index to the counter handler. */ static void -flow_verbs_counter_release(struct mlx5_flow_counter *counter) +flow_verbs_counter_release(struct rte_eth_dev *dev, uint32_t counter) { - if (--counter->ref_cnt == 0) { + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter *cnt; + + cnt = flow_verbs_counter_get_by_idx(dev, counter, &pool); + if (IS_SHARED_CNT(counter) && + mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id)) + return; #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) - claim_zero(mlx5_glue->destroy_counter_set(counter->cs)); + claim_zero(mlx5_glue->destroy_counter_set + ((struct ibv_counter_set *)cnt->dcs_when_active)); #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) - claim_zero(mlx5_glue->destroy_counters(counter->cs)); + claim_zero(mlx5_glue->destroy_counters + ((struct ibv_counters *)cnt->dcs_when_active)); #endif - LIST_REMOVE(counter, next); - rte_free(counter); - } + TAILQ_INSERT_HEAD(&pool->counters[0], cnt, next); } /** @@ -186,12 +374,16 @@ flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, { #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) - if (flow->actions & MLX5_FLOW_ACTION_COUNT) { + if (flow->counter) { + struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter *cnt = flow_verbs_counter_get_by_idx + (dev, flow->counter, &pool); struct rte_flow_query_count *qc = data; uint64_t counters[2] = {0, 0}; #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) struct ibv_query_counter_set_attr query_cs_attr = { - .cs = flow->counter->cs, + .dcs_when_free = (struct ibv_counter_set *) + cnt->dcs_when_active, .query_flags = IBV_COUNTER_SET_FORCE_UPDATE, }; struct ibv_counter_set_data query_out = { @@ -202,7 +394,7 @@ flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, &query_out); #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) int err = mlx5_glue->query_counters - (flow->counter->cs, counters, + ((struct ibv_counters *)cnt->dcs_when_active, counters, RTE_DIM(counters), IBV_READ_COUNTERS_ATTR_PREFER_CACHED); #endif @@ -214,11 +406,11 @@ flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, "cannot read counter"); qc->hits_set = 1; qc->bytes_set = 1; - qc->hits = counters[0] - flow->counter->hits; - qc->bytes = counters[1] - flow->counter->bytes; + qc->hits = counters[0] - cnt->hits; + qc->bytes = counters[1] - cnt->bytes; if (qc->reset) { - flow->counter->hits = counters[0]; - flow->counter->bytes = counters[1]; + cnt->hits = counters[0]; + cnt->bytes = counters[1]; } return 0; } @@ -247,16 +439,17 @@ flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused, * Size in bytes of the specification to copy. */ static void -flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size) +flow_verbs_spec_add(struct mlx5_flow_verbs_workspace *verbs, + void *src, unsigned int size) { void *dst; if (!verbs) return; - assert(verbs->specs); + MLX5_ASSERT(verbs->specs); dst = (void *)(verbs->specs + verbs->size); memcpy(dst, src, size); - ++verbs->attr->num_of_specs; + ++verbs->attr.num_of_specs; verbs->size += size; } @@ -385,7 +578,10 @@ flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow, if (!(item_flags & l2m)) flow_verbs_spec_add(&dev_flow->verbs, ð, size); else - flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð); + flow_verbs_item_vlan_update(&dev_flow->verbs.attr, ð); + if (!tunnel) + dev_flow->handle->vf_vlan.tag = + rte_be_to_cpu_16(spec->tci) & 0x0fff; } /** @@ -485,14 +681,12 @@ flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow, ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; ipv6.val.next_hdr = spec->hdr.proto; - ipv6.val.hop_limit = spec->hdr.hop_limits; ipv6.mask.flow_label = rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >> RTE_IPV6_HDR_FL_SHIFT); ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; ipv6.mask.next_hdr = mask->hdr.proto; - ipv6.mask.hop_limit = mask->hdr.hop_limits; /* Remove unwanted bits from values. */ for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) { ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i]; @@ -501,7 +695,6 @@ flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow, ipv6.val.flow_label &= ipv6.mask.flow_label; ipv6.val.traffic_class &= ipv6.mask.traffic_class; ipv6.val.next_hdr &= ipv6.mask.next_hdr; - ipv6.val.hop_limit &= ipv6.mask.hop_limit; } flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size); } @@ -581,6 +774,28 @@ flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow, udp.val.src_port &= udp.mask.src_port; udp.val.dst_port &= udp.mask.dst_port; } + item++; + while (item->type == RTE_FLOW_ITEM_TYPE_VOID) + item++; + if (!(udp.val.dst_port & udp.mask.dst_port)) { + switch ((item)->type) { + case RTE_FLOW_ITEM_TYPE_VXLAN: + udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN); + udp.mask.dst_port = 0xffff; + break; + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + udp.val.dst_port = htons(MLX5_UDP_PORT_VXLAN_GPE); + udp.mask.dst_port = 0xffff; + break; + case RTE_FLOW_ITEM_TYPE_MPLS: + udp.val.dst_port = htons(MLX5_UDP_PORT_MPLS); + udp.mask.dst_port = 0xffff; + break; + default: + break; + } + } + flow_verbs_spec_add(&dev_flow->verbs, &udp, size); } @@ -737,7 +952,7 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, const struct rte_flow_item *item __rte_unused, uint64_t item_flags) { - struct mlx5_flow_verbs *verbs = &dev_flow->verbs; + struct mlx5_flow_verbs_workspace *verbs = &dev_flow->verbs; #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT unsigned int size = sizeof(struct ibv_flow_spec_tunnel); struct ibv_flow_spec_tunnel tunnel = { @@ -767,11 +982,11 @@ flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow, } #endif if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4) - flow_verbs_item_gre_ip_protocol_update(verbs->attr, + flow_verbs_item_gre_ip_protocol_update(&verbs->attr, IBV_FLOW_SPEC_IPV4_EXT, IPPROTO_GRE); else - flow_verbs_item_gre_ip_protocol_update(verbs->attr, + flow_verbs_item_gre_ip_protocol_update(&verbs->attr, IBV_FLOW_SPEC_IPV6, IPPROTO_GRE); flow_verbs_spec_add(verbs, &tunnel, size); @@ -844,21 +1059,19 @@ flow_verbs_translate_action_drop * the input is valid and that there is space to insert the requested action * into the flow. * - * @param[in] dev_flow - * Pointer to mlx5_flow. + * @param[in] rss_desc + * Pointer to mlx5_flow_rss_desc. * @param[in] action * Action configuration. */ static void -flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow, +flow_verbs_translate_action_queue(struct mlx5_flow_rss_desc *rss_desc, const struct rte_flow_action *action) { const struct rte_flow_action_queue *queue = action->conf; - struct rte_flow *flow = dev_flow->flow; - if (flow->queue) - (*flow->queue)[0] = queue->index; - flow->rss.queue_num = 1; + rss_desc->queue[0] = queue->index; + rss_desc->queue_num = 1; } /** @@ -866,31 +1079,27 @@ flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow, * the input is valid and that there is space to insert the requested action * into the flow. * + * @param[in] rss_desc + * Pointer to mlx5_flow_rss_desc. * @param[in] action * Action configuration. - * @param[in, out] action_flags - * Pointer to the detected actions. - * @param[in] dev_flow - * Pointer to mlx5_flow. */ static void -flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow, +flow_verbs_translate_action_rss(struct mlx5_flow_rss_desc *rss_desc, const struct rte_flow_action *action) { const struct rte_flow_action_rss *rss = action->conf; const uint8_t *rss_key; - struct rte_flow *flow = dev_flow->flow; - if (flow->queue) - memcpy((*flow->queue), rss->queue, - rss->queue_num * sizeof(uint16_t)); - flow->rss.queue_num = rss->queue_num; + memcpy(rss_desc->queue, rss->queue, rss->queue_num * sizeof(uint16_t)); + rss_desc->queue_num = rss->queue_num; /* NULL RSS key indicates default RSS key. */ rss_key = !rss->key ? rss_hash_default_key : rss->key; - memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN); - /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */ - flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types; - flow->rss.level = rss->level; + memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN); + /* + * rss->level and rss.types should be set in advance when expanding + * items for RSS. + */ } /** @@ -970,6 +1179,8 @@ flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, struct rte_flow *flow = dev_flow->flow; #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) + struct mlx5_flow_counter_pool *pool; + struct mlx5_flow_counter *cnt = NULL; unsigned int size = sizeof(struct ibv_flow_spec_counter_action); struct ibv_flow_spec_counter_action counter = { .type = IBV_FLOW_SPEC_ACTION_COUNT, @@ -988,10 +1199,13 @@ flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, " context."); } #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) - counter.counter_set_handle = flow->counter->cs->handle; + cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); + counter.counter_set_handle = + ((struct ibv_counter_set *)cnt->dcs_when_active)->handle; flow_verbs_spec_add(&dev_flow->verbs, &counter, size); #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) - counter.counters = flow->counter->cs; + cnt = flow_verbs_counter_get_by_idx(dev, flow->counter, &pool); + counter.counters = (struct ibv_counters *)cnt->dcs_when_active; flow_verbs_spec_add(&dev_flow->verbs, &counter, size); #endif return 0; @@ -1008,6 +1222,10 @@ flow_verbs_translate_action_count(struct mlx5_flow *dev_flow, * Pointer to the list of items. * @param[in] actions * Pointer to the list of actions. + * @param[in] external + * This flow rule is created by request external to PMD. + * @param[in] hairpin + * Number of hairpin TX actions, 0 means classic flow. * @param[out] error * Pointer to the error structure. * @@ -1019,6 +1237,8 @@ flow_verbs_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_item items[], const struct rte_flow_action actions[], + bool external __rte_unused, + int hairpin __rte_unused, struct rte_flow_error *error) { int ret; @@ -1026,6 +1246,7 @@ flow_verbs_validate(struct rte_eth_dev *dev, uint64_t item_flags = 0; uint64_t last_item = 0; uint8_t next_protocol = 0xff; + uint16_t ether_type = 0; if (items == NULL) return -1; @@ -1041,25 +1262,50 @@ flow_verbs_validate(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_ETH: ret = mlx5_flow_validate_item_eth(items, item_flags, - error); + false, error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : MLX5_FLOW_LAYER_OUTER_L2; + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_eth *) + items->spec)->type; + ether_type &= + ((const struct rte_flow_item_eth *) + items->mask)->type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } break; case RTE_FLOW_ITEM_TYPE_VLAN: ret = mlx5_flow_validate_item_vlan(items, item_flags, - error); + dev, error); if (ret < 0) return ret; last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_VLAN) : (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_VLAN); + if (items->mask != NULL && items->spec != NULL) { + ether_type = + ((const struct rte_flow_item_vlan *) + items->spec)->inner_type; + ether_type &= + ((const struct rte_flow_item_vlan *) + items->mask)->inner_type; + ether_type = rte_be_to_cpu_16(ether_type); + } else { + ether_type = 0; + } break; case RTE_FLOW_ITEM_TYPE_IPV4: - ret = mlx5_flow_validate_item_ipv4(items, item_flags, - NULL, error); + ret = mlx5_flow_validate_item_ipv4 + (items, item_flags, + last_item, ether_type, NULL, + MLX5_ITEM_RANGE_NOT_ACCEPTED, + error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -1080,7 +1326,9 @@ flow_verbs_validate(struct rte_eth_dev *dev, break; case RTE_FLOW_ITEM_TYPE_IPV6: ret = mlx5_flow_validate_item_ipv6(items, item_flags, - NULL, error); + last_item, + ether_type, NULL, + error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : @@ -1149,6 +1397,12 @@ flow_verbs_validate(struct rte_eth_dev *dev, return ret; last_item = MLX5_FLOW_LAYER_MPLS; break; + case RTE_FLOW_ITEM_TYPE_ICMP: + case RTE_FLOW_ITEM_TYPE_ICMP6: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "ICMP/ICMP6 " + "item not supported"); default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -1216,6 +1470,18 @@ flow_verbs_validate(struct rte_eth_dev *dev, "action not supported"); } } + /* + * Validate the drop action mutual exclusion with other actions. + * Drop action is mutually-exclusive with any other action, except for + * Count action. + */ + if ((action_flags & MLX5_FLOW_ACTION_DROP) && + (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Drop action is mutually-exclusive " + "with any other action, except for " + "Count action"); if (!(action_flags & MLX5_FLOW_FATE_ACTIONS)) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -1335,6 +1601,8 @@ flow_verbs_get_items_size(const struct rte_flow_item items[]) * The required size is calculate based on the actions and items. This function * also returns the detected actions and items for later use. * + * @param[in] dev + * Pointer to Ethernet device. * @param[in] attr * Pointer to the flow attributes. * @param[in] items @@ -1349,27 +1617,54 @@ flow_verbs_get_items_size(const struct rte_flow_item items[]) * is set. */ static struct mlx5_flow * -flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused, +flow_verbs_prepare(struct rte_eth_dev *dev, + const struct rte_flow_attr *attr __rte_unused, const struct rte_flow_item items[], const struct rte_flow_action actions[], struct rte_flow_error *error) { - uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr); - struct mlx5_flow *flow; + size_t size = 0; + uint32_t handle_idx = 0; + struct mlx5_flow *dev_flow; + struct mlx5_flow_handle *dev_handle; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + MLX5_ASSERT(wks); size += flow_verbs_get_actions_size(actions); size += flow_verbs_get_items_size(items); - flow = rte_calloc(__func__, 1, size, 0); - if (!flow) { + if (size > MLX5_VERBS_MAX_SPEC_ACT_SIZE) { + rte_flow_error_set(error, E2BIG, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Verbs spec/action size too large"); + return NULL; + } + /* In case of corrupting the memory. */ + if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { + rte_flow_error_set(error, ENOSPC, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "not free temporary device flow"); + return NULL; + } + dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + &handle_idx); + if (!dev_handle) { rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "not enough memory to create flow"); + "not enough memory to create flow handle"); return NULL; } - flow->verbs.attr = (void *)(flow + 1); - flow->verbs.specs = - (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr); - return flow; + MLX5_ASSERT(wks->flow_idx + 1 < RTE_DIM(wks->flows)); + dev_flow = &wks->flows[wks->flow_idx++]; + dev_flow->handle = dev_handle; + dev_flow->handle_idx = handle_idx; + /* Memcpy is used, only size needs to be cleared to 0. */ + dev_flow->verbs.size = 0; + dev_flow->verbs.attr.num_of_specs = 0; + dev_flow->ingress = attr->ingress; + dev_flow->hash_fields = 0; + /* Need to set transfer attribute: not supported in Verbs mode. */ + return dev_flow; } /** @@ -1399,13 +1694,16 @@ flow_verbs_translate(struct rte_eth_dev *dev, const struct rte_flow_action actions[], struct rte_flow_error *error) { - struct rte_flow *flow = dev_flow->flow; uint64_t item_flags = 0; uint64_t action_flags = 0; uint64_t priority = attr->priority; uint32_t subpriority = 0; struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + struct mlx5_flow_rss_desc *rss_desc; + MLX5_ASSERT(wks); + rss_desc = &wks->rss_desc; if (priority == MLX5_FLOW_PRIO_RSVD) priority = priv->config.flow_prio - 1; for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { @@ -1417,22 +1715,27 @@ flow_verbs_translate(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_FLAG: flow_verbs_translate_action_flag(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_FLAG; + dev_flow->handle->mark = 1; break; case RTE_FLOW_ACTION_TYPE_MARK: flow_verbs_translate_action_mark(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_MARK; + dev_flow->handle->mark = 1; break; case RTE_FLOW_ACTION_TYPE_DROP: flow_verbs_translate_action_drop(dev_flow, actions); action_flags |= MLX5_FLOW_ACTION_DROP; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP; break; case RTE_FLOW_ACTION_TYPE_QUEUE: - flow_verbs_translate_action_queue(dev_flow, actions); + flow_verbs_translate_action_queue(rss_desc, actions); action_flags |= MLX5_FLOW_ACTION_QUEUE; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; break; case RTE_FLOW_ACTION_TYPE_RSS: - flow_verbs_translate_action_rss(dev_flow, actions); + flow_verbs_translate_action_rss(rss_desc, actions); action_flags |= MLX5_FLOW_ACTION_RSS; + dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE; break; case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_verbs_translate_action_count(dev_flow, @@ -1449,7 +1752,7 @@ flow_verbs_translate(struct rte_eth_dev *dev, "action not supported"); } } - flow->actions = action_flags; + dev_flow->act_flags = action_flags; for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) { int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); @@ -1476,9 +1779,9 @@ flow_verbs_translate(struct rte_eth_dev *dev, flow_verbs_translate_item_ipv4(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L3; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust - (dev_flow, tunnel, + (rss_desc, tunnel, MLX5_IPV4_LAYER_TYPES, MLX5_IPV4_IBV_RX_HASH); item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -1488,9 +1791,9 @@ flow_verbs_translate(struct rte_eth_dev *dev, flow_verbs_translate_item_ipv6(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L3; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust - (dev_flow, tunnel, + (rss_desc, tunnel, MLX5_IPV6_LAYER_TYPES, MLX5_IPV6_IBV_RX_HASH); item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : @@ -1500,9 +1803,9 @@ flow_verbs_translate(struct rte_eth_dev *dev, flow_verbs_translate_item_tcp(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L4; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust - (dev_flow, tunnel, ETH_RSS_TCP, + (rss_desc, tunnel, ETH_RSS_TCP, (IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)); item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : @@ -1512,9 +1815,9 @@ flow_verbs_translate(struct rte_eth_dev *dev, flow_verbs_translate_item_udp(dev_flow, items, item_flags); subpriority = MLX5_PRIORITY_MAP_L4; - dev_flow->verbs.hash_fields |= + dev_flow->hash_fields |= mlx5_flow_hashfields_adjust - (dev_flow, tunnel, ETH_RSS_UDP, + (rss_desc, tunnel, ETH_RSS_UDP, (IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)); item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP : @@ -1523,38 +1826,38 @@ flow_verbs_translate(struct rte_eth_dev *dev, case RTE_FLOW_ITEM_TYPE_VXLAN: flow_verbs_translate_item_vxlan(dev_flow, items, item_flags); - subpriority = MLX5_PRIORITY_MAP_L2; + subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); item_flags |= MLX5_FLOW_LAYER_VXLAN; break; case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: flow_verbs_translate_item_vxlan_gpe(dev_flow, items, item_flags); - subpriority = MLX5_PRIORITY_MAP_L2; + subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE; break; case RTE_FLOW_ITEM_TYPE_GRE: flow_verbs_translate_item_gre(dev_flow, items, item_flags); - subpriority = MLX5_PRIORITY_MAP_L2; + subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); item_flags |= MLX5_FLOW_LAYER_GRE; break; case RTE_FLOW_ITEM_TYPE_MPLS: flow_verbs_translate_item_mpls(dev_flow, items, item_flags); - subpriority = MLX5_PRIORITY_MAP_L2; + subpriority = MLX5_TUNNEL_PRIO_GET(rss_desc); item_flags |= MLX5_FLOW_LAYER_MPLS; break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, - NULL, - "item not supported"); + NULL, "item not supported"); } } - dev_flow->layers = item_flags; - dev_flow->verbs.attr->priority = + dev_flow->handle->layers = item_flags; + /* Other members of attr will be ignored. */ + dev_flow->verbs.attr.priority = mlx5_flow_adjust_priority(dev, priority, subpriority); - dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port; + dev_flow->verbs.attr.port = (uint8_t)priv->dev_port; return 0; } @@ -1569,24 +1872,26 @@ flow_verbs_translate(struct rte_eth_dev *dev, static void flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow_verbs *verbs; - struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_handle *handle; + uint32_t handle_idx; if (!flow) return; - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - verbs = &dev_flow->verbs; - if (verbs->flow) { - claim_zero(mlx5_glue->destroy_flow(verbs->flow)); - verbs->flow = NULL; + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + handle_idx, handle, next) { + if (handle->drv_flow) { + claim_zero(mlx5_glue->destroy_flow(handle->drv_flow)); + handle->drv_flow = NULL; } - if (verbs->hrxq) { - if (flow->actions & MLX5_FLOW_ACTION_DROP) - mlx5_hrxq_drop_release(dev); - else - mlx5_hrxq_release(dev, verbs->hrxq); - verbs->hrxq = NULL; + /* hrxq is union, don't touch it only the flag is set. */ + if (handle->rix_hrxq && + handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, handle->rix_hrxq); + handle->rix_hrxq = 0; } + if (handle->vf_vlan.tag && handle->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); } } @@ -1601,19 +1906,26 @@ flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow) static void flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) { - struct mlx5_flow *dev_flow; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_handle *handle; if (!flow) return; flow_verbs_remove(dev, flow); - while (!LIST_EMPTY(&flow->dev_flows)) { - dev_flow = LIST_FIRST(&flow->dev_flows); - LIST_REMOVE(dev_flow, next); - rte_free(dev_flow); + while (flow->dev_handles) { + uint32_t tmp_idx = flow->dev_handles; + + handle = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); + if (!handle) + return; + flow->dev_handles = handle->next.next; + mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], + tmp_idx); } if (flow->counter) { - flow_verbs_counter_release(flow->counter); - flow->counter = NULL; + flow_verbs_counter_release(dev, flow->counter); + flow->counter = 0; } } @@ -1634,37 +1946,35 @@ static int flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, struct rte_flow_error *error) { - struct mlx5_flow_verbs *verbs; + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_flow_handle *handle; struct mlx5_flow *dev_flow; + struct mlx5_hrxq *hrxq; + uint32_t dev_handles; int err; - - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - verbs = &dev_flow->verbs; - if (flow->actions & MLX5_FLOW_ACTION_DROP) { - verbs->hrxq = mlx5_hrxq_drop_new(dev); - if (!verbs->hrxq) { - rte_flow_error_set - (error, errno, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "cannot get drop hash queue"); - goto error; - } + int idx; + struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); + + MLX5_ASSERT(wks); + for (idx = wks->flow_idx - 1; idx >= 0; idx--) { + dev_flow = &wks->flows[idx]; + handle = dev_flow->handle; + if (handle->fate_action == MLX5_FLOW_FATE_DROP) { + MLX5_ASSERT(priv->drop_queue.hrxq); + hrxq = priv->drop_queue.hrxq; } else { - struct mlx5_hrxq *hrxq; - - hrxq = mlx5_hrxq_get(dev, flow->key, - MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), - flow->rss.queue_num); - if (!hrxq) - hrxq = mlx5_hrxq_new(dev, flow->key, - MLX5_RSS_HASH_KEY_LEN, - verbs->hash_fields, - (*flow->queue), - flow->rss.queue_num, - !!(dev_flow->layers & - MLX5_FLOW_LAYER_TUNNEL)); + uint32_t hrxq_idx; + struct mlx5_flow_rss_desc *rss_desc = &wks->rss_desc; + + MLX5_ASSERT(rss_desc->queue_num); + rss_desc->key_len = MLX5_RSS_HASH_KEY_LEN; + rss_desc->hash_fields = dev_flow->hash_fields; + rss_desc->tunnel = !!(handle->layers & + MLX5_FLOW_LAYER_TUNNEL); + rss_desc->shared_rss = 0; + hrxq_idx = mlx5_hrxq_get(dev, rss_desc); + hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], + hrxq_idx); if (!hrxq) { rte_flow_error_set (error, rte_errno, @@ -1672,30 +1982,42 @@ flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow, "cannot get hash queue"); goto error; } - verbs->hrxq = hrxq; + handle->rix_hrxq = hrxq_idx; } - verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp, - verbs->attr); - if (!verbs->flow) { + MLX5_ASSERT(hrxq); + handle->drv_flow = mlx5_glue->create_flow + (hrxq->qp, &dev_flow->verbs.attr); + if (!handle->drv_flow) { rte_flow_error_set(error, errno, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "hardware refuses to create flow"); goto error; } + if (priv->vmwa_context && + handle->vf_vlan.tag && !handle->vf_vlan.created) { + /* + * The rule contains the VLAN pattern. + * For VF we are going to create VLAN + * interface to make hypervisor set correct + * e-Switch vport context. + */ + mlx5_vlan_vmwa_acquire(dev, &handle->vf_vlan); + } } return 0; error: err = rte_errno; /* Save rte_errno before cleanup. */ - LIST_FOREACH(dev_flow, &flow->dev_flows, next) { - verbs = &dev_flow->verbs; - if (verbs->hrxq) { - if (flow->actions & MLX5_FLOW_ACTION_DROP) - mlx5_hrxq_drop_release(dev); - else - mlx5_hrxq_release(dev, verbs->hrxq); - verbs->hrxq = NULL; + SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles, + dev_handles, handle, next) { + /* hrxq is union, don't touch it only the flag is set. */ + if (handle->rix_hrxq && + handle->fate_action == MLX5_FLOW_FATE_QUEUE) { + mlx5_hrxq_release(dev, handle->rix_hrxq); + handle->rix_hrxq = 0; } + if (handle->vf_vlan.tag && handle->vf_vlan.created) + mlx5_vlan_vmwa_release(dev, &handle->vf_vlan); } rte_errno = err; /* Restore rte_errno. */ return -rte_errno; @@ -1733,6 +2055,17 @@ flow_verbs_query(struct rte_eth_dev *dev, return ret; } +static int +flow_verbs_sync_domain(struct rte_eth_dev *dev, uint32_t domains, + uint32_t flags) +{ + RTE_SET_USED(dev); + RTE_SET_USED(domains); + RTE_SET_USED(flags); + + return 0; +} + const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = { .validate = flow_verbs_validate, .prepare = flow_verbs_prepare, @@ -1741,4 +2074,5 @@ const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = { .remove = flow_verbs_remove, .destroy = flow_verbs_destroy, .query = flow_verbs_query, + .sync_domain = flow_verbs_sync_domain, };