X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=e8f409fd07ffa65c676de4bbeaa99a7c296b053b;hb=db48f9db5d9ffdfc851161e12f05ae099df44de0;hp=a49e62794e87a0bab681493210f1e8f544adc802;hpb=fc2c498ccb942c6a9de3f0a555250e0b09707c80;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index a49e62794e..e8f409fd07 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -2,6 +2,7 @@ * Copyright 2018 Mellanox Technologies, Ltd */ + #include #include #include @@ -34,6 +35,67 @@ #ifdef HAVE_IBV_FLOW_DV_SUPPORT +/** + * Validate META item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] attr + * Attributes of flow that includes this item. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_meta(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + const struct rte_flow_item_meta *spec = item->spec; + const struct rte_flow_item_meta *mask = item->mask; + const struct rte_flow_item_meta nic_mask = { + .data = RTE_BE32(UINT32_MAX) + }; + int ret; + uint64_t offloads = dev->data->dev_conf.txmode.offloads; + + if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA)) + return rte_flow_error_set(error, EPERM, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, + "match on metadata offload " + "configuration is off for this port"); + if (!spec) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + item->spec, + "data cannot be empty"); + if (!spec->data) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, + NULL, + "data cannot be zero"); + if (!mask) + mask = &rte_flow_item_meta_mask; + ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask, + (const uint8_t *)&nic_mask, + sizeof(struct rte_flow_item_meta), + error); + if (ret < 0) + return ret; + if (attr->ingress) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, + NULL, + "pattern not supported for ingress"); + return 0; +} + /** * Verify the @p attributes will be correctly understood by the NIC and store * them in the @p flow if everything is correct. @@ -67,21 +129,16 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL, "priority out of range"); - if (attributes->egress) - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, - NULL, - "egress is not supported"); if (attributes->transfer) return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, NULL, "transfer is not supported"); - if (!attributes->ingress) - return rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, - NULL, - "ingress attribute is mandatory"); + if (!(attributes->egress ^ attributes->ingress)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ATTR, NULL, + "must specify exactly one of " + "ingress or egress"); return 0; } @@ -178,8 +235,11 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, MLX5_FLOW_LAYER_OUTER_L4_UDP; break; case RTE_FLOW_ITEM_TYPE_TCP: - ret = mlx5_flow_validate_item_tcp(items, item_flags, - next_protocol, error); + ret = mlx5_flow_validate_item_tcp + (items, item_flags, + next_protocol, + &rte_flow_item_tcp_mask, + error); if (ret < 0) return ret; item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP : @@ -215,6 +275,13 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; item_flags |= MLX5_FLOW_LAYER_MPLS; break; + case RTE_FLOW_ITEM_TYPE_META: + ret = flow_dv_validate_item_meta(dev, items, attr, + error); + if (ret < 0) + return ret; + item_flags |= MLX5_FLOW_ITEM_METADATA; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -232,7 +299,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_FLAG: ret = mlx5_flow_validate_action_flag(action_flags, - error); + attr, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_FLAG; @@ -241,7 +308,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_MARK: ret = mlx5_flow_validate_action_mark(actions, action_flags, - error); + attr, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_MARK; @@ -249,7 +316,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ACTION_TYPE_DROP: ret = mlx5_flow_validate_action_drop(action_flags, - error); + attr, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_DROP; @@ -258,7 +325,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_QUEUE: ret = mlx5_flow_validate_action_queue(actions, action_flags, dev, - error); + attr, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_QUEUE; @@ -267,14 +334,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, case RTE_FLOW_ACTION_TYPE_RSS: ret = mlx5_flow_validate_action_rss(actions, action_flags, dev, - error); + attr, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_RSS; ++actions_n; break; case RTE_FLOW_ACTION_TYPE_COUNT: - ret = mlx5_flow_validate_action_count(dev, error); + ret = mlx5_flow_validate_action_count(dev, attr, error); if (ret < 0) return ret; action_flags |= MLX5_FLOW_ACTION_COUNT; @@ -287,7 +354,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "action not supported"); } } - if (!(action_flags & MLX5_FLOW_FATE_ACTIONS)) + if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, "no fate action is found"); @@ -781,6 +848,8 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, const struct rte_flow_item_nvgre *nvgre_v = item->spec; void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters); void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters); + const char *tni_flow_id_m = (const char *)nvgre_m->tni; + const char *tni_flow_id_v = (const char *)nvgre_v->tni; char *gre_key_m; char *gre_key_v; int size; @@ -793,9 +862,9 @@ flow_dv_translate_item_nvgre(void *matcher, void *key, size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id); gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h); gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h); - memcpy(gre_key_m, nvgre_m->tni, size); + memcpy(gre_key_m, tni_flow_id_m, size); for (i = 0; i < size; ++i) - gre_key_v[i] = gre_key_m[i] & ((const char *)(nvgre_v->tni))[i]; + gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i]; flow_dv_translate_item_gre(matcher, key, item, inner); } @@ -855,6 +924,41 @@ flow_dv_translate_item_vxlan(void *matcher, void *key, vni_v[i] = vni_m[i] & vxlan_v->vni[i]; } +/** + * Add META item to matcher + * + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + * @param[in] inner + * Item is inner pattern. + */ +static void +flow_dv_translate_item_meta(void *matcher, void *key, + const struct rte_flow_item *item) +{ + const struct rte_flow_item_meta *meta_m; + const struct rte_flow_item_meta *meta_v; + void *misc2_m = + MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2); + void *misc2_v = + MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2); + + meta_m = (const void *)item->mask; + if (!meta_m) + meta_m = &rte_flow_item_meta_mask; + meta_v = (const void *)item->spec; + if (meta_v) { + MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, + rte_be_to_cpu_32(meta_m->data)); + MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, + rte_be_to_cpu_32(meta_v->data & meta_m->data)); + } +} + /** * Update the matcher and the value based the selected item. * @@ -941,9 +1045,75 @@ flow_dv_create_item(void *matcher, void *key, flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item, inner); break; + case RTE_FLOW_ITEM_TYPE_META: + flow_dv_translate_item_meta(tmatcher->mask.buf, key, item); + break; + default: + break; + } +} + +/** + * Store the requested actions in an array. + * + * @param[in] action + * Flow action to translate. + * @param[in, out] dev_flow + * Pointer to the mlx5_flow. + */ +static void +flow_dv_create_action(const struct rte_flow_action *action, + struct mlx5_flow *dev_flow) +{ + const struct rte_flow_action_queue *queue; + const struct rte_flow_action_rss *rss; + int actions_n = dev_flow->dv.actions_n; + struct rte_flow *flow = dev_flow->flow; + + switch (action->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_FLAG: + dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG; + dev_flow->dv.actions[actions_n].tag_value = + MLX5_FLOW_MARK_DEFAULT; + actions_n++; + flow->actions |= MLX5_FLOW_ACTION_FLAG; + break; + case RTE_FLOW_ACTION_TYPE_MARK: + dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG; + dev_flow->dv.actions[actions_n].tag_value = + ((const struct rte_flow_action_mark *) + (action->conf))->id; + flow->actions |= MLX5_FLOW_ACTION_MARK; + actions_n++; + break; + case RTE_FLOW_ACTION_TYPE_DROP: + dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP; + flow->actions |= MLX5_FLOW_ACTION_DROP; + break; + case RTE_FLOW_ACTION_TYPE_QUEUE: + queue = action->conf; + flow->rss.queue_num = 1; + (*flow->queue)[0] = queue->index; + flow->actions |= MLX5_FLOW_ACTION_QUEUE; + break; + case RTE_FLOW_ACTION_TYPE_RSS: + rss = action->conf; + if (flow->queue) + memcpy((*flow->queue), rss->queue, + rss->queue_num * sizeof(uint16_t)); + flow->rss.queue_num = rss->queue_num; + memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN); + flow->rss.types = rss->types; + flow->rss.level = rss->level; + /* Added to array only in apply since we need the QP */ + flow->actions |= MLX5_FLOW_ACTION_RSS; + break; default: break; } + dev_flow->dv.actions_n = actions_n; } static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; @@ -1042,7 +1212,7 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, if (matcher->egress) dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS; cache_matcher->matcher_object = - mlx5dv_create_flow_matcher(priv->ctx, &dv_attr); + mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr); if (!cache_matcher->matcher_object) return rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, @@ -1109,26 +1279,222 @@ flow_dv_translate(struct rte_eth_dev *dev, matcher.egress = attr->egress; if (flow_dv_matcher_register(dev, &matcher, dev_flow, error)) return -rte_errno; + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) + flow_dv_create_action(actions, dev_flow); return 0; } /** - * Fills the flow_ops with the function pointers. + * Apply the flow to the NIC. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + * @param[out] error + * Pointer to error structure. * - * @param[out] flow_ops - * Pointer to driver_ops structure. + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. */ -void -mlx5_flow_dv_get_driver_ops(struct mlx5_flow_driver_ops *flow_ops) +static int +flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow, + struct rte_flow_error *error) { - *flow_ops = (struct mlx5_flow_driver_ops) { - .validate = flow_dv_validate, - .prepare = flow_dv_prepare, - .translate = flow_dv_translate, - .apply = NULL, - .remove = NULL, - .destroy = NULL, - }; + struct mlx5_flow_dv *dv; + struct mlx5_flow *dev_flow; + int n; + int err; + + LIST_FOREACH(dev_flow, &flow->dev_flows, next) { + dv = &dev_flow->dv; + n = dv->actions_n; + if (flow->actions & MLX5_FLOW_ACTION_DROP) { + dv->hrxq = mlx5_hrxq_drop_new(dev); + if (!dv->hrxq) { + rte_flow_error_set + (error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get drop hash queue"); + goto error; + } + dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP; + dv->actions[n].qp = dv->hrxq->qp; + n++; + } else if (flow->actions & + (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) { + struct mlx5_hrxq *hrxq; + hrxq = mlx5_hrxq_get(dev, flow->key, + MLX5_RSS_HASH_KEY_LEN, + dv->hash_fields, + (*flow->queue), + flow->rss.queue_num); + if (!hrxq) + hrxq = mlx5_hrxq_new + (dev, flow->key, MLX5_RSS_HASH_KEY_LEN, + dv->hash_fields, (*flow->queue), + flow->rss.queue_num, + !!(flow->layers & + MLX5_FLOW_LAYER_TUNNEL)); + if (!hrxq) { + rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "cannot get hash queue"); + goto error; + } + dv->hrxq = hrxq; + dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP; + dv->actions[n].qp = hrxq->qp; + n++; + } + dv->flow = + mlx5_glue->dv_create_flow(dv->matcher->matcher_object, + (void *)&dv->value, n, + dv->actions); + if (!dv->flow) { + rte_flow_error_set(error, errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "hardware refuses to create flow"); + goto error; + } + } + return 0; +error: + err = rte_errno; /* Save rte_errno before cleanup. */ + LIST_FOREACH(dev_flow, &flow->dev_flows, next) { + struct mlx5_flow_dv *dv = &dev_flow->dv; + if (dv->hrxq) { + if (flow->actions & MLX5_FLOW_ACTION_DROP) + mlx5_hrxq_drop_release(dev); + else + mlx5_hrxq_release(dev, dv->hrxq); + dv->hrxq = NULL; + } + } + rte_errno = err; /* Restore rte_errno. */ + return -rte_errno; } +/** + * Release the flow matcher. + * + * @param dev + * Pointer to Ethernet device. + * @param flow + * Pointer to mlx5_flow. + * + * @return + * 1 while a reference on it exists, 0 when freed. + */ +static int +flow_dv_matcher_release(struct rte_eth_dev *dev, + struct mlx5_flow *flow) +{ + struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher; + + assert(matcher->matcher_object); + DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", + dev->data->port_id, (void *)matcher, + rte_atomic32_read(&matcher->refcnt)); + if (rte_atomic32_dec_and_test(&matcher->refcnt)) { + claim_zero(mlx5_glue->dv_destroy_flow_matcher + (matcher->matcher_object)); + LIST_REMOVE(matcher, next); + rte_free(matcher); + DRV_LOG(DEBUG, "port %u matcher %p: removed", + dev->data->port_id, (void *)matcher); + return 0; + } + return 1; +} + +/** + * Remove the flow from the NIC but keeps it in memory. + * + * @param[in] dev + * Pointer to Ethernet device. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow_dv *dv; + struct mlx5_flow *dev_flow; + + if (!flow) + return; + LIST_FOREACH(dev_flow, &flow->dev_flows, next) { + dv = &dev_flow->dv; + if (dv->flow) { + claim_zero(mlx5_glue->destroy_flow(dv->flow)); + dv->flow = NULL; + } + if (dv->hrxq) { + if (flow->actions & MLX5_FLOW_ACTION_DROP) + mlx5_hrxq_drop_release(dev); + else + mlx5_hrxq_release(dev, dv->hrxq); + dv->hrxq = NULL; + } + } + if (flow->counter) + flow->counter = NULL; +} + +/** + * Remove the flow from the NIC and the memory. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in, out] flow + * Pointer to flow structure. + */ +static void +flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) +{ + struct mlx5_flow *dev_flow; + + if (!flow) + return; + flow_dv_remove(dev, flow); + while (!LIST_EMPTY(&flow->dev_flows)) { + dev_flow = LIST_FIRST(&flow->dev_flows); + LIST_REMOVE(dev_flow, next); + if (dev_flow->dv.matcher) + flow_dv_matcher_release(dev, dev_flow); + rte_free(dev_flow); + } +} + +/** + * Query a flow. + * + * @see rte_flow_query() + * @see rte_flow_ops + */ +static int +flow_dv_query(struct rte_eth_dev *dev __rte_unused, + struct rte_flow *flow __rte_unused, + const struct rte_flow_action *actions __rte_unused, + void *data __rte_unused, + struct rte_flow_error *error __rte_unused) +{ + rte_errno = ENOTSUP; + return -rte_errno; +} + + +const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { + .validate = flow_dv_validate, + .prepare = flow_dv_prepare, + .translate = flow_dv_translate, + .apply = flow_dv_apply, + .remove = flow_dv_remove, + .destroy = flow_dv_destroy, + .query = flow_dv_query, +}; + #endif /* HAVE_IBV_FLOW_DV_SUPPORT */