X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=70e8d0b113f802faf47db4e877ab263e8dab5281;hb=978a0303a3bf41598615a01392e6d4312101c3af;hp=6bccdf5b16e31e4dfd579a82fa1f619c3f211dcc;hpb=fc6ce56bba05c2ee9b4b9cc4bd719c3440f2a346;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 6bccdf5b16..70e8d0b113 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -268,6 +268,31 @@ struct field_modify_info modify_tcp[] = { {0, 0, 0}, }; +static const struct rte_flow_item * +mlx5_flow_find_tunnel_item(const struct rte_flow_item *item) +{ + for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { + switch (item->type) { + default: + break; + case RTE_FLOW_ITEM_TYPE_VXLAN: + case RTE_FLOW_ITEM_TYPE_VXLAN_GPE: + case RTE_FLOW_ITEM_TYPE_GRE: + case RTE_FLOW_ITEM_TYPE_MPLS: + case RTE_FLOW_ITEM_TYPE_NVGRE: + case RTE_FLOW_ITEM_TYPE_GENEVE: + return item; + case RTE_FLOW_ITEM_TYPE_IPV4: + case RTE_FLOW_ITEM_TYPE_IPV6: + if (item[1].type == RTE_FLOW_ITEM_TYPE_IPV4 || + item[1].type == RTE_FLOW_ITEM_TYPE_IPV6) + return item; + break; + } + } + return NULL; +} + static void mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused, uint8_t next_protocol, uint64_t *item_flags, @@ -1366,7 +1391,7 @@ mlx5_flow_item_field_width(enum rte_flow_field_id field) case RTE_FLOW_FIELD_TCP_ACK_NUM: return 32; case RTE_FLOW_FIELD_TCP_FLAGS: - return 6; + return 9; case RTE_FLOW_FIELD_UDP_PORT_SRC: case RTE_FLOW_FIELD_UDP_PORT_DST: return 16; @@ -1688,10 +1713,10 @@ mlx5_flow_field_id_to_modify_info (32 - width)); break; case RTE_FLOW_FIELD_TCP_FLAGS: - info[idx] = (struct field_modify_info){1, 0, + info[idx] = (struct field_modify_info){2, 0, MLX5_MODI_OUT_TCP_FLAGS}; if (mask) - mask[idx] = 0x3f >> (6 - width); + mask[idx] = rte_cpu_to_be_16(0x1ff >> (9 - width)); break; case RTE_FLOW_FIELD_UDP_PORT_SRC: info[idx] = (struct field_modify_info){2, 0, @@ -2598,6 +2623,51 @@ flow_dv_validate_item_ipv6_frag_ext(const struct rte_flow_item *item, "specified range not supported"); } +/* + * Validate ASO CT item. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] item + * Item specification. + * @param[in] item_flags + * Pointer to bit-fields that holds the items detected until now. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev, + const struct rte_flow_item *item, + uint64_t *item_flags, + struct rte_flow_error *error) +{ + const struct rte_flow_item_conntrack *spec = item->spec; + const struct rte_flow_item_conntrack *mask = item->mask; + RTE_SET_USED(dev); + uint32_t flags; + + if (*item_flags & MLX5_FLOW_LAYER_ASO_CT) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Only one CT is supported"); + if (!mask) + mask = &rte_flow_item_conntrack_mask; + flags = spec->flags & mask->flags; + if ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) && + ((flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) || + (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) || + (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, NULL, + "Conflict status bits"); + /* State change also needs to be considered. */ + *item_flags |= MLX5_FLOW_LAYER_ASO_CT; + return 0; +} + /** * Validate the pop VLAN action. * @@ -3146,13 +3216,33 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, return 0; } +/** + * Check if action counter is shared by either old or new mechanism. + * + * @param[in] action + * Pointer to the action structure. + * + * @return + * True when counter is shared, false otherwise. + */ +static inline bool +is_shared_action_count(const struct rte_flow_action *action) +{ + const struct rte_flow_action_count *count = + (const struct rte_flow_action_count *)action->conf; + + if ((int)action->type == MLX5_RTE_FLOW_ACTION_TYPE_COUNT) + return true; + return !!(count && count->shared); +} + /** * Validate count action. * * @param[in] dev * Pointer to rte_eth_dev structure. - * @param[in] action - * Pointer to the action structure. + * @param[in] shared + * Indicator if action is shared. * @param[in] action_flags * Holds the actions detected until now. * @param[out] error @@ -3162,13 +3252,11 @@ flow_dv_validate_action_set_tag(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_validate_action_count(struct rte_eth_dev *dev, - const struct rte_flow_action *action, +flow_dv_validate_action_count(struct rte_eth_dev *dev, bool shared, uint64_t action_flags, struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; - const struct rte_flow_action_count *count; if (!priv->config.devx) goto notsup_err; @@ -3176,8 +3264,7 @@ flow_dv_validate_action_count(struct rte_eth_dev *dev, return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "duplicate count actions set"); - count = (const struct rte_flow_action_count *)action->conf; - if (count && count->shared && (action_flags & MLX5_FLOW_ACTION_AGE) && + if (shared && (action_flags & MLX5_FLOW_ACTION_AGE) && !priv->sh->flow_hit_aso_en) return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, NULL, @@ -3400,6 +3487,57 @@ flow_dv_validate_action_raw_encap_decap return 0; } +/* + * Validate the ASO CT action. + * + * @param[in] dev + * Pointer to the rte_eth_dev structure. + * @param[in] action_flags + * Holds the actions detected until now. + * @param[in] item_flags + * The items found in this flow rule. + * @param[in] attr + * Pointer to flow attributes. + * @param[out] error + * Pointer to error structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev, + uint64_t action_flags, + uint64_t item_flags, + const struct rte_flow_attr *attr, + struct rte_flow_error *error) +{ + RTE_SET_USED(dev); + + if (attr->group == 0 && !attr->transfer) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Only support non-root table"); + if (action_flags & MLX5_FLOW_FATE_ACTIONS) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "CT cannot follow a fate action"); + if ((action_flags & MLX5_FLOW_ACTION_METER) || + (action_flags & MLX5_FLOW_ACTION_AGE)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Only one ASO action is supported"); + if (action_flags & MLX5_FLOW_ACTION_ENCAP) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Encap cannot exist before CT"); + if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "Not a outer TCP packet"); + return 0; +} + /** * Match encap_decap resource. * @@ -5254,7 +5392,7 @@ flow_dv_validate_action_sample(uint64_t *action_flags, break; case RTE_FLOW_ACTION_TYPE_COUNT: ret = flow_dv_validate_action_count - (dev, act, + (dev, is_shared_action_count(act), *action_flags | sub_action_flags, error); if (ret < 0) @@ -5424,7 +5562,7 @@ flow_dv_modify_hdr_resource_register * @param[in] idx * mlx5 flow counter index in the container. * @param[out] ppool - * mlx5 flow counter pool in the container, + * mlx5 flow counter pool in the container. * * @return * Pointer to the counter, NULL otherwise. @@ -5554,7 +5692,7 @@ flow_dv_container_resize(struct rte_eth_dev *dev) * * @param[in] dev * Pointer to the Ethernet device structure. - * @param[in] cnt + * @param[in] counter * Index to the flow counter. * @param[out] pkts * The statistics value of packets. @@ -5795,6 +5933,13 @@ flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t age) if (!fallback && !priv->sh->cmng.query_thread_on) /* Start the asynchronous batch query by the host thread. */ mlx5_set_query_alarm(priv->sh); + /* + * When the count action isn't shared (by ID), shared_info field is + * used for indirect action API's refcnt. + * When the counter action is not shared neither by ID nor by indirect + * action API, shared info must be 1. + */ + cnt_free->shared_info.refcnt = 1; return cnt_idx; err: if (cnt_free) { @@ -5941,9 +6086,26 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) return; cnt = flow_dv_counter_get_by_idx(dev, counter, &pool); MLX5_ASSERT(pool); - if (IS_SHARED_CNT(counter) && + /* + * If the counter action is shared by ID, the l3t_clear_entry function + * reduces its references counter. If after the reduction the action is + * still referenced, the function returns here and does not release it. + */ + if (IS_LEGACY_SHARED_CNT(counter) && mlx5_l3t_clear_entry(priv->sh->cnt_id_tbl, cnt->shared_info.id)) return; + /* + * If the counter action is shared by indirect action API, the atomic + * function reduces its references counter. If after the reduction the + * action is still referenced, the function returns here and does not + * release it. + * When the counter action is not shared neither by ID nor by indirect + * action API, shared info is 1 before the reduction, so this condition + * is failed and function doesn't return here. + */ + if (!IS_LEGACY_SHARED_CNT(counter) && + __atomic_sub_fetch(&cnt->shared_info.refcnt, 1, __ATOMIC_RELAXED)) + return; if (pool->is_aged) flow_dv_counter_remove_from_age(dev, counter, cnt); cnt->pool = pool; @@ -5955,7 +6117,6 @@ flow_dv_counter_free(struct rte_eth_dev *dev, uint32_t counter) * container counter list. The list changes while query starts. In * this case, lock will not be needed as query callback and release * function both operate with the different list. - * */ if (!priv->sh->cmng.counter_fallback) { rte_spinlock_lock(&pool->csl); @@ -6230,6 +6391,158 @@ flow_dv_validate_attributes(struct rte_eth_dev *dev, return ret; } +static uint16_t +mlx5_flow_locate_proto_l3(const struct rte_flow_item **head, + const struct rte_flow_item *end) +{ + const struct rte_flow_item *item = *head; + uint16_t l3_protocol; + + for (; item != end; item++) { + switch (item->type) { + default: + break; + case RTE_FLOW_ITEM_TYPE_IPV4: + l3_protocol = RTE_ETHER_TYPE_IPV4; + goto l3_ok; + case RTE_FLOW_ITEM_TYPE_IPV6: + l3_protocol = RTE_ETHER_TYPE_IPV6; + goto l3_ok; + case RTE_FLOW_ITEM_TYPE_ETH: + if (item->mask && item->spec) { + MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_eth, + type, item, + l3_protocol); + if (l3_protocol == RTE_ETHER_TYPE_IPV4 || + l3_protocol == RTE_ETHER_TYPE_IPV6) + goto l3_ok; + } + break; + case RTE_FLOW_ITEM_TYPE_VLAN: + if (item->mask && item->spec) { + MLX5_ETHER_TYPE_FROM_HEADER(rte_flow_item_vlan, + inner_type, item, + l3_protocol); + if (l3_protocol == RTE_ETHER_TYPE_IPV4 || + l3_protocol == RTE_ETHER_TYPE_IPV6) + goto l3_ok; + } + break; + } + } + return 0; +l3_ok: + *head = item; + return l3_protocol; +} + +static uint8_t +mlx5_flow_locate_proto_l4(const struct rte_flow_item **head, + const struct rte_flow_item *end) +{ + const struct rte_flow_item *item = *head; + uint8_t l4_protocol; + + for (; item != end; item++) { + switch (item->type) { + default: + break; + case RTE_FLOW_ITEM_TYPE_TCP: + l4_protocol = IPPROTO_TCP; + goto l4_ok; + case RTE_FLOW_ITEM_TYPE_UDP: + l4_protocol = IPPROTO_UDP; + goto l4_ok; + case RTE_FLOW_ITEM_TYPE_IPV4: + if (item->mask && item->spec) { + const struct rte_flow_item_ipv4 *mask, *spec; + + mask = (typeof(mask))item->mask; + spec = (typeof(spec))item->spec; + l4_protocol = mask->hdr.next_proto_id & + spec->hdr.next_proto_id; + if (l4_protocol == IPPROTO_TCP || + l4_protocol == IPPROTO_UDP) + goto l4_ok; + } + break; + case RTE_FLOW_ITEM_TYPE_IPV6: + if (item->mask && item->spec) { + const struct rte_flow_item_ipv6 *mask, *spec; + mask = (typeof(mask))item->mask; + spec = (typeof(spec))item->spec; + l4_protocol = mask->hdr.proto & spec->hdr.proto; + if (l4_protocol == IPPROTO_TCP || + l4_protocol == IPPROTO_UDP) + goto l4_ok; + } + break; + } + } + return 0; +l4_ok: + *head = item; + return l4_protocol; +} + +static int +flow_dv_validate_item_integrity(struct rte_eth_dev *dev, + const struct rte_flow_item *rule_items, + const struct rte_flow_item *integrity_item, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + const struct rte_flow_item *tunnel_item, *end_item, *item = rule_items; + const struct rte_flow_item_integrity *mask = (typeof(mask)) + integrity_item->mask; + const struct rte_flow_item_integrity *spec = (typeof(spec)) + integrity_item->spec; + uint32_t protocol; + + if (!priv->config.hca_attr.pkt_integrity_match) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + integrity_item, + "packet integrity integrity_item not supported"); + if (!mask) + mask = &rte_flow_item_integrity_mask; + if (!mlx5_validate_integrity_item(mask)) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + integrity_item, + "unsupported integrity filter"); + tunnel_item = mlx5_flow_find_tunnel_item(rule_items); + if (spec->level > 1) { + if (!tunnel_item) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + integrity_item, + "missing tunnel item"); + item = tunnel_item; + end_item = mlx5_find_end_item(tunnel_item); + } else { + end_item = tunnel_item ? tunnel_item : + mlx5_find_end_item(integrity_item); + } + if (mask->l3_ok || mask->ipv4_csum_ok) { + protocol = mlx5_flow_locate_proto_l3(&item, end_item); + if (!protocol) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + integrity_item, + "missing L3 protocol"); + } + if (mask->l4_ok || mask->l4_csum_ok) { + protocol = mlx5_flow_locate_proto_l4(&item, end_item); + if (!protocol) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + integrity_item, + "missing L4 protocol"); + } + return 0; +} + /** * Internal validation function. For validating both actions and items. * @@ -6274,7 +6587,6 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, const struct rte_flow_action_raw_encap *encap; const struct rte_flow_action_rss *rss = NULL; const struct rte_flow_action_rss *sample_rss = NULL; - const struct rte_flow_action_count *count = NULL; const struct rte_flow_action_count *sample_count = NULL; const struct rte_flow_item_tcp nic_tcp_mask = { .hdr = { @@ -6321,6 +6633,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, .fdb_def_rule = !!priv->fdb_def_rule, }; const struct rte_eth_hairpin_conf *conf; + const struct rte_flow_item *rule_items = items; bool def_policy = false; if (items == NULL) @@ -6644,6 +6957,24 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, return ret; last_item = MLX5_FLOW_LAYER_ECPRI; break; + case RTE_FLOW_ITEM_TYPE_INTEGRITY: + if (item_flags & MLX5_FLOW_ITEM_INTEGRITY) + return rte_flow_error_set + (error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + NULL, "multiple integrity items not supported"); + ret = flow_dv_validate_item_integrity(dev, rule_items, + items, error); + if (ret < 0) + return ret; + last_item = MLX5_FLOW_ITEM_INTEGRITY; + break; + case RTE_FLOW_ITEM_TYPE_CONNTRACK: + ret = flow_dv_validate_item_aso_ct(dev, items, + &item_flags, error); + if (ret < 0) + return ret; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, @@ -6653,6 +6984,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, } for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { int type = actions->type; + bool shared_count = false; if (!mlx5_flow_os_action_supported(type)) return rte_flow_error_set(error, ENOTSUP, @@ -6802,13 +7134,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS; ++actions_n; break; + case MLX5_RTE_FLOW_ACTION_TYPE_COUNT: case RTE_FLOW_ACTION_TYPE_COUNT: - ret = flow_dv_validate_action_count(dev, actions, + shared_count = is_shared_action_count(actions); + ret = flow_dv_validate_action_count(dev, shared_count, action_flags, error); if (ret < 0) return ret; - count = actions->conf; action_flags |= MLX5_FLOW_ACTION_COUNT; ++actions_n; break; @@ -7102,6 +7435,12 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "Shared ASO age action is not supported for group 0"); + if (action_flags & MLX5_FLOW_ACTION_AGE) + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "duplicate age actions set"); action_flags |= MLX5_FLOW_ACTION_AGE; ++actions_n; break; @@ -7116,7 +7455,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, * mutual exclusion with share counter actions. */ if (!priv->sh->flow_hit_aso_en) { - if (count && count->shared) + if (shared_count) return rte_flow_error_set (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, @@ -7204,6 +7543,14 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; rw_act_num += ret; break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + ret = flow_dv_validate_action_aso_ct(dev, action_flags, + item_flags, attr, + error); + if (ret < 0) + return ret; + action_flags |= MLX5_FLOW_ACTION_CT; + break; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -7439,6 +7786,7 @@ flow_dv_prepare(struct rte_eth_dev *dev, struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace(); MLX5_ASSERT(wks); + wks->skip_matcher_reg = 0; /* In case of corrupting the memory. */ if (wks->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) { rte_flow_error_set(error, ENOSPC, @@ -9377,6 +9725,64 @@ flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher, } } +/* + * Add connection tracking status item to matcher + * + * @param[in] dev + * The devich to configure through. + * @param[in, out] matcher + * Flow matcher. + * @param[in, out] key + * Flow matcher value. + * @param[in] item + * Flow pattern to translate. + */ +static void +flow_dv_translate_item_aso_ct(struct rte_eth_dev *dev, + void *matcher, void *key, + const struct rte_flow_item *item) +{ + uint32_t reg_value = 0; + int reg_id; + /* 8LSB 0b 11/0000/11, middle 4 bits are reserved. */ + uint32_t reg_mask = 0; + const struct rte_flow_item_conntrack *spec = item->spec; + const struct rte_flow_item_conntrack *mask = item->mask; + uint32_t flags; + struct rte_flow_error error; + + if (!mask) + mask = &rte_flow_item_conntrack_mask; + if (!spec || !mask->flags) + return; + flags = spec->flags & mask->flags; + /* The conflict should be checked in the validation. */ + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID) + reg_value |= MLX5_CT_SYNDROME_VALID; + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED) + reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE; + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID) + reg_value |= MLX5_CT_SYNDROME_INVALID; + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED) + reg_value |= MLX5_CT_SYNDROME_TRAP; + if (flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) + reg_value |= MLX5_CT_SYNDROME_BAD_PACKET; + if (mask->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID | + RTE_FLOW_CONNTRACK_PKT_STATE_INVALID | + RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)) + reg_mask |= 0xc0; + if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED) + reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE; + if (mask->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD) + reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET; + /* The REG_C_x value could be saved during startup. */ + reg_id = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, &error); + if (reg_id == REG_NON) + return; + flow_dv_match_meta_reg(matcher, key, (enum modify_reg)reg_id, + reg_value, reg_mask); +} + static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 }; #define HEADER_IS_ZERO(match_criteria, headers) \ @@ -9931,6 +10337,8 @@ flow_dv_translate_action_port_id(struct rte_eth_dev *dev, * * @param[in] dev * Pointer to rte_eth_dev structure. + * @param[in] dev_flow + * Pointer to the mlx5_flow. * @param[out] count * Pointer to the counter action configuration. * @param[in] age @@ -9954,7 +10362,7 @@ flow_dv_translate_create_counter(struct rte_eth_dev *dev, counter = flow_dv_counter_alloc(dev, !!age); if (!counter || age == NULL) return counter; - age_param = flow_dv_counter_idx_get_age(dev, counter); + age_param = flow_dv_counter_idx_get_age(dev, counter); age_param->context = age->context ? age->context : (void *)(uintptr_t)(dev_flow->flow_idx); age_param->timeout = age->timeout; @@ -11118,21 +11526,457 @@ flow_dv_translate_create_aso_age(struct rte_eth_dev *dev, return age_idx; } +static void +flow_dv_translate_integrity_l4(const struct rte_flow_item_integrity *mask, + const struct rte_flow_item_integrity *value, + void *headers_m, void *headers_v) +{ + if (mask->l4_ok) { + /* application l4_ok filter aggregates all hardware l4 filters + * therefore hw l4_checksum_ok must be implicitly added here. + */ + struct rte_flow_item_integrity local_item; + + local_item.l4_csum_ok = 1; + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, + local_item.l4_csum_ok); + if (value->l4_ok) { + /* application l4_ok = 1 matches sets both hw flags + * l4_ok and l4_checksum_ok flags to 1. + */ + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + l4_checksum_ok, local_item.l4_csum_ok); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_ok, + mask->l4_ok); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_ok, + value->l4_ok); + } else { + /* application l4_ok = 0 matches on hw flag + * l4_checksum_ok = 0 only. + */ + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + l4_checksum_ok, 0); + } + } else if (mask->l4_csum_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l4_checksum_ok, + mask->l4_csum_ok); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok, + value->l4_csum_ok); + } +} + +static void +flow_dv_translate_integrity_l3(const struct rte_flow_item_integrity *mask, + const struct rte_flow_item_integrity *value, + void *headers_m, void *headers_v, + bool is_ipv4) +{ + if (mask->l3_ok) { + /* application l3_ok filter aggregates all hardware l3 filters + * therefore hw ipv4_checksum_ok must be implicitly added here. + */ + struct rte_flow_item_integrity local_item; + + local_item.ipv4_csum_ok = !!is_ipv4; + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, + local_item.ipv4_csum_ok); + if (value->l3_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + ipv4_checksum_ok, local_item.ipv4_csum_ok); + MLX5_SET(fte_match_set_lyr_2_4, headers_m, l3_ok, + mask->l3_ok); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, l3_ok, + value->l3_ok); + } else { + MLX5_SET(fte_match_set_lyr_2_4, headers_v, + ipv4_checksum_ok, 0); + } + } else if (mask->ipv4_csum_ok) { + MLX5_SET(fte_match_set_lyr_2_4, headers_m, ipv4_checksum_ok, + mask->ipv4_csum_ok); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ipv4_checksum_ok, + value->ipv4_csum_ok); + } +} + +static void +flow_dv_translate_item_integrity(void *matcher, void *key, + const struct rte_flow_item *head_item, + const struct rte_flow_item *integrity_item) +{ + const struct rte_flow_item_integrity *mask = integrity_item->mask; + const struct rte_flow_item_integrity *value = integrity_item->spec; + const struct rte_flow_item *tunnel_item, *end_item, *item; + void *headers_m; + void *headers_v; + uint32_t l3_protocol; + + if (!value) + return; + if (!mask) + mask = &rte_flow_item_integrity_mask; + if (value->level > 1) { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + inner_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + } else { + headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + outer_headers); + headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + } + tunnel_item = mlx5_flow_find_tunnel_item(head_item); + if (value->level > 1) { + /* tunnel item was verified during the item validation */ + item = tunnel_item; + end_item = mlx5_find_end_item(tunnel_item); + } else { + item = head_item; + end_item = tunnel_item ? tunnel_item : + mlx5_find_end_item(integrity_item); + } + l3_protocol = mask->l3_ok ? + mlx5_flow_locate_proto_l3(&item, end_item) : 0; + flow_dv_translate_integrity_l3(mask, value, headers_m, headers_v, + l3_protocol == RTE_ETHER_TYPE_IPV4); + flow_dv_translate_integrity_l4(mask, value, headers_m, headers_v); +} + /** - * Fill the flow with DV spec, lock free - * (mutex should be acquired by caller). + * Prepares DV flow counter with aging configuration. + * Gets it by index when exists, creates a new one when doesn't. * * @param[in] dev * Pointer to rte_eth_dev structure. - * @param[in, out] dev_flow + * @param[in] dev_flow + * Pointer to the mlx5_flow. + * @param[in, out] flow * Pointer to the sub flow. - * @param[in] attr - * Pointer to the flow attributes. - * @param[in] items - * Pointer to the list of items. - * @param[in] actions - * Pointer to the list of actions. - * @param[out] error + * @param[in] count + * Pointer to the counter action configuration. + * @param[in] age + * Pointer to the aging action configuration. + * @param[out] error + * Pointer to the error structure. + * + * @return + * Pointer to the counter, NULL otherwise. + */ +static struct mlx5_flow_counter * +flow_dv_prepare_counter(struct rte_eth_dev *dev, + struct mlx5_flow *dev_flow, + struct rte_flow *flow, + const struct rte_flow_action_count *count, + const struct rte_flow_action_age *age, + struct rte_flow_error *error) +{ + if (!flow->counter) { + flow->counter = flow_dv_translate_create_counter(dev, dev_flow, + count, age); + if (!flow->counter) { + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "cannot create counter object."); + return NULL; + } + } + return flow_dv_counter_get_by_idx(dev, flow->counter, NULL); +} + +/* + * Release an ASO CT action by its own device. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * Index of ASO CT action to release. + * + * @return + * 0 when CT action was removed, otherwise the number of references. + */ +static inline int +flow_dv_aso_ct_dev_release(struct rte_eth_dev *dev, uint32_t idx) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng; + uint32_t ret; + struct mlx5_aso_ct_action *ct = flow_aso_ct_get_by_dev_idx(dev, idx); + enum mlx5_aso_ct_state state = + __atomic_load_n(&ct->state, __ATOMIC_RELAXED); + + /* Cannot release when CT is in the ASO SQ. */ + if (state == ASO_CONNTRACK_WAIT || state == ASO_CONNTRACK_QUERY) + return -1; + ret = __atomic_sub_fetch(&ct->refcnt, 1, __ATOMIC_RELAXED); + if (!ret) { + if (ct->dr_action_orig) { +#ifdef HAVE_MLX5_DR_ACTION_ASO_CT + claim_zero(mlx5_glue->destroy_flow_action + (ct->dr_action_orig)); +#endif + ct->dr_action_orig = NULL; + } + if (ct->dr_action_rply) { +#ifdef HAVE_MLX5_DR_ACTION_ASO_CT + claim_zero(mlx5_glue->destroy_flow_action + (ct->dr_action_rply)); +#endif + ct->dr_action_rply = NULL; + } + /* Clear the state to free, no need in 1st allocation. */ + MLX5_ASO_CT_UPDATE_STATE(ct, ASO_CONNTRACK_FREE); + rte_spinlock_lock(&mng->ct_sl); + LIST_INSERT_HEAD(&mng->free_cts, ct, next); + rte_spinlock_unlock(&mng->ct_sl); + } + return (int)ret; +} + +static inline int +flow_dv_aso_ct_release(struct rte_eth_dev *dev, uint32_t own_idx) +{ + uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx); + uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx); + struct rte_eth_dev *owndev = &rte_eth_devices[owner]; + RTE_SET_USED(dev); + + MLX5_ASSERT(owner < RTE_MAX_ETHPORTS); + if (dev->data->dev_started != 1) + return -1; + return flow_dv_aso_ct_dev_release(owndev, idx); +} + +/* + * Resize the ASO CT pools array by 64 pools. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * + * @return + * 0 on success, otherwise negative errno value and rte_errno is set. + */ +static int +flow_dv_aso_ct_pools_resize(struct rte_eth_dev *dev) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng; + void *old_pools = mng->pools; + /* Magic number now, need a macro. */ + uint32_t resize = mng->n + 64; + uint32_t mem_size = sizeof(struct mlx5_aso_ct_pool *) * resize; + void *pools = mlx5_malloc(MLX5_MEM_ZERO, mem_size, 0, SOCKET_ID_ANY); + + if (!pools) { + rte_errno = ENOMEM; + return -rte_errno; + } + rte_rwlock_write_lock(&mng->resize_rwl); + /* ASO SQ/QP was already initialized in the startup. */ + if (old_pools) { + /* Realloc could be an alternative choice. */ + rte_memcpy(pools, old_pools, + mng->n * sizeof(struct mlx5_aso_ct_pool *)); + mlx5_free(old_pools); + } + mng->n = resize; + mng->pools = pools; + rte_rwlock_write_unlock(&mng->resize_rwl); + return 0; +} + +/* + * Create and initialize a new ASO CT pool. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] ct_free + * Where to put the pointer of a new CT action. + * + * @return + * The CT actions pool pointer and @p ct_free is set on success, + * NULL otherwise and rte_errno is set. + */ +static struct mlx5_aso_ct_pool * +flow_dv_ct_pool_create(struct rte_eth_dev *dev, + struct mlx5_aso_ct_action **ct_free) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng; + struct mlx5_aso_ct_pool *pool = NULL; + struct mlx5_devx_obj *obj = NULL; + uint32_t i; + uint32_t log_obj_size = rte_log2_u32(MLX5_ASO_CT_ACTIONS_PER_POOL); + + obj = mlx5_devx_cmd_create_conn_track_offload_obj(priv->sh->ctx, + priv->sh->pdn, log_obj_size); + if (!obj) { + rte_errno = ENODATA; + DRV_LOG(ERR, "Failed to create conn_track_offload_obj using DevX."); + return NULL; + } + pool = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*pool), 0, SOCKET_ID_ANY); + if (!pool) { + rte_errno = ENOMEM; + claim_zero(mlx5_devx_cmd_destroy(obj)); + return NULL; + } + pool->devx_obj = obj; + pool->index = mng->next; + /* Resize pools array if there is no room for the new pool in it. */ + if (pool->index == mng->n && flow_dv_aso_ct_pools_resize(dev)) { + claim_zero(mlx5_devx_cmd_destroy(obj)); + mlx5_free(pool); + return NULL; + } + mng->pools[pool->index] = pool; + mng->next++; + /* Assign the first action in the new pool, the rest go to free list. */ + *ct_free = &pool->actions[0]; + /* Lock outside, the list operation is safe here. */ + for (i = 1; i < MLX5_ASO_CT_ACTIONS_PER_POOL; i++) { + /* refcnt is 0 when allocating the memory. */ + pool->actions[i].offset = i; + LIST_INSERT_HEAD(&mng->free_cts, &pool->actions[i], next); + } + return pool; +} + +/* + * Allocate a ASO CT action from free list. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[out] error + * Pointer to the error structure. + * + * @return + * Index to ASO CT action on success, 0 otherwise and rte_errno is set. + */ +static uint32_t +flow_dv_aso_ct_alloc(struct rte_eth_dev *dev, struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng; + struct mlx5_aso_ct_action *ct = NULL; + struct mlx5_aso_ct_pool *pool; + uint8_t reg_c; + uint32_t ct_idx; + + MLX5_ASSERT(mng); + if (!priv->config.devx) { + rte_errno = ENOTSUP; + return 0; + } + /* Get a free CT action, if no, a new pool will be created. */ + rte_spinlock_lock(&mng->ct_sl); + ct = LIST_FIRST(&mng->free_cts); + if (ct) { + LIST_REMOVE(ct, next); + } else if (!flow_dv_ct_pool_create(dev, &ct)) { + rte_spinlock_unlock(&mng->ct_sl); + rte_flow_error_set(error, rte_errno, RTE_FLOW_ERROR_TYPE_ACTION, + NULL, "failed to create ASO CT pool"); + return 0; + } + rte_spinlock_unlock(&mng->ct_sl); + pool = container_of(ct, struct mlx5_aso_ct_pool, actions[ct->offset]); + ct_idx = MLX5_MAKE_CT_IDX(pool->index, ct->offset); + /* 0: inactive, 1: created, 2+: used by flows. */ + __atomic_store_n(&ct->refcnt, 1, __ATOMIC_RELAXED); + reg_c = mlx5_flow_get_reg_id(dev, MLX5_ASO_CONNTRACK, 0, error); + if (!ct->dr_action_orig) { +#ifdef HAVE_MLX5_DR_ACTION_ASO_CT + ct->dr_action_orig = mlx5_glue->dv_create_flow_action_aso + (priv->sh->rx_domain, pool->devx_obj->obj, + ct->offset, + MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR, + reg_c - REG_C_0); +#else + RTE_SET_USED(reg_c); +#endif + if (!ct->dr_action_orig) { + flow_dv_aso_ct_dev_release(dev, ct_idx); + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "failed to create ASO CT action"); + return 0; + } + } + if (!ct->dr_action_rply) { +#ifdef HAVE_MLX5_DR_ACTION_ASO_CT + ct->dr_action_rply = mlx5_glue->dv_create_flow_action_aso + (priv->sh->rx_domain, pool->devx_obj->obj, + ct->offset, + MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER, + reg_c - REG_C_0); +#endif + if (!ct->dr_action_rply) { + flow_dv_aso_ct_dev_release(dev, ct_idx); + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "failed to create ASO CT action"); + return 0; + } + } + return ct_idx; +} + +/* + * Create a conntrack object with context and actions by using ASO mechanism. + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in] pro + * Pointer to conntrack information profile. + * @param[out] error + * Pointer to the error structure. + * + * @return + * Index to conntrack object on success, 0 otherwise. + */ +static uint32_t +flow_dv_translate_create_conntrack(struct rte_eth_dev *dev, + const struct rte_flow_action_conntrack *pro, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_dev_ctx_shared *sh = priv->sh; + struct mlx5_aso_ct_action *ct; + uint32_t idx; + + if (!sh->ct_aso_en) + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Connection is not supported"); + idx = flow_dv_aso_ct_alloc(dev, error); + if (!idx) + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Failed to allocate CT object"); + ct = flow_aso_ct_get_by_dev_idx(dev, idx); + if (mlx5_aso_ct_update_by_wqe(sh, ct, pro)) + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "Failed to update CT"); + ct->is_original = !!pro->is_original_dir; + ct->peer = pro->peer_port; + return idx; +} + +/** + * Fill the flow with DV spec, lock free + * (mutex should be acquired by caller). + * + * @param[in] dev + * Pointer to rte_eth_dev structure. + * @param[in, out] dev_flow + * Pointer to the sub flow. + * @param[in] attr + * Pointer to the flow attributes. + * @param[in] items + * Pointer to the list of items. + * @param[in] actions + * Pointer to the list of actions. + * @param[out] error * Pointer to the error structure. * * @return @@ -11171,7 +12015,7 @@ flow_dv_translate(struct rte_eth_dev *dev, } mhdr_dummy; struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res; const struct rte_flow_action_count *count = NULL; - const struct rte_flow_action_age *age = NULL; + const struct rte_flow_action_age *non_shared_age = NULL; union flow_dv_attr flow_attr = { .attr = 0 }; uint32_t tag_be; union mlx5_flow_tbl_key tbl_key; @@ -11186,6 +12030,7 @@ flow_dv_translate(struct rte_eth_dev *dev, const struct rte_flow_action_sample *sample = NULL; struct mlx5_flow_sub_actions_list *sample_act; uint32_t sample_act_pos = UINT32_MAX; + uint32_t age_act_pos = UINT32_MAX; uint32_t num_of_dest = 0; int tmp_actions_n = 0; uint32_t table; @@ -11198,6 +12043,7 @@ flow_dv_translate(struct rte_eth_dev *dev, .skip_scale = dev_flow->skip_scale & (1 << MLX5_SCALE_FLOW_GROUP_BIT), }; + const struct rte_flow_item *head_item = items; if (!wks) return rte_flow_error_set(error, ENOMEM, @@ -11265,11 +12111,14 @@ flow_dv_translate(struct rte_eth_dev *dev, const uint8_t *rss_key; struct mlx5_flow_tbl_resource *tbl; struct mlx5_aso_age_action *age_act; + struct mlx5_flow_counter *cnt_act; uint32_t port_id = 0; struct mlx5_flow_dv_port_id_action_resource port_id_resource; int action_type = actions->type; const struct rte_flow_action *found_action = NULL; uint32_t jump_group = 0; + uint32_t owner_idx; + struct mlx5_aso_ct_action *ct; if (!mlx5_flow_os_action_supported(action_type)) return rte_flow_error_set(error, ENOTSUP, @@ -11407,34 +12256,23 @@ flow_dv_translate(struct rte_eth_dev *dev, age_act = flow_aso_age_get_by_idx(dev, flow->age); __atomic_fetch_add(&age_act->refcnt, 1, __ATOMIC_RELAXED); - dev_flow->dv.actions[actions_n++] = age_act->dr_action; + age_act_pos = actions_n++; action_flags |= MLX5_FLOW_ACTION_AGE; break; case RTE_FLOW_ACTION_TYPE_AGE: - if (priv->sh->flow_hit_aso_en && attr->group) { - /* - * Create one shared age action, to be used - * by all sub-flows. - */ - if (!flow->age) { - flow->age = - flow_dv_translate_create_aso_age - (dev, action->conf, - error); - if (!flow->age) - return rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "can't create ASO age action"); - } - dev_flow->dv.actions[actions_n++] = - (flow_aso_age_get_by_idx - (dev, flow->age))->dr_action; - action_flags |= MLX5_FLOW_ACTION_AGE; - break; - } - /* Fall-through */ + non_shared_age = action->conf; + age_act_pos = actions_n++; + action_flags |= MLX5_FLOW_ACTION_AGE; + break; + case MLX5_RTE_FLOW_ACTION_TYPE_COUNT: + flow->counter = (uint32_t)(uintptr_t)(action->conf); + cnt_act = flow_dv_counter_get_by_idx(dev, flow->counter, + NULL); + __atomic_fetch_add(&cnt_act->shared_info.refcnt, 1, + __ATOMIC_RELAXED); + /* Save information first, will apply later. */ + action_flags |= MLX5_FLOW_ACTION_COUNT; + break; case RTE_FLOW_ACTION_TYPE_COUNT: if (!dev_conf->devx) { return rte_flow_error_set @@ -11444,10 +12282,7 @@ flow_dv_translate(struct rte_eth_dev *dev, "count action not supported"); } /* Save information first, will apply later. */ - if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT) - count = action->conf; - else - age = action->conf; + count = action->conf; action_flags |= MLX5_FLOW_ACTION_COUNT; break; case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN: @@ -11554,6 +12389,11 @@ flow_dv_translate(struct rte_eth_dev *dev, /* If decap is followed by encap, handle it at encap. */ action_flags |= MLX5_FLOW_ACTION_DECAP; break; + case MLX5_RTE_FLOW_ACTION_TYPE_JUMP: + dev_flow->dv.actions[actions_n++] = + (void *)(uintptr_t)action->conf; + action_flags |= MLX5_FLOW_ACTION_JUMP; + break; case RTE_FLOW_ACTION_TYPE_JUMP: jump_group = ((const struct rte_flow_action_jump *) action->conf)->group; @@ -11732,6 +12572,31 @@ flow_dv_translate(struct rte_eth_dev *dev, return -rte_errno; action_flags |= MLX5_FLOW_ACTION_MODIFY_FIELD; break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + owner_idx = (uint32_t)(uintptr_t)action->conf; + ct = flow_aso_ct_get_by_idx(dev, owner_idx); + if (!ct) + return rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Failed to get CT object."); + if (mlx5_aso_ct_available(priv->sh, ct)) + return rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "CT is unavailable."); + if (ct->is_original) + dev_flow->dv.actions[actions_n] = + ct->dr_action_orig; + else + dev_flow->dv.actions[actions_n] = + ct->dr_action_rply; + flow->indirect_type = MLX5_INDIRECT_ACTION_TYPE_CT; + flow->ct = owner_idx; + __atomic_fetch_add(&ct->refcnt, 1, __ATOMIC_RELAXED); + actions_n++; + action_flags |= MLX5_FLOW_ACTION_CT; + break; case RTE_FLOW_ACTION_TYPE_END: actions_end = true; if (mhdr_res->actions_num) { @@ -11742,27 +12607,57 @@ flow_dv_translate(struct rte_eth_dev *dev, dev_flow->dv.actions[modify_action_position] = handle->dvh.modify_hdr->action; } + /* + * Handle AGE and COUNT action by single HW counter + * when they are not shared. + */ + if (action_flags & MLX5_FLOW_ACTION_AGE) { + if ((non_shared_age && + count && !count->shared) || + !(priv->sh->flow_hit_aso_en && + attr->group)) { + /* Creates age by counters. */ + cnt_act = flow_dv_prepare_counter + (dev, dev_flow, + flow, count, + non_shared_age, + error); + if (!cnt_act) + return -rte_errno; + dev_flow->dv.actions[age_act_pos] = + cnt_act->action; + break; + } + if (!flow->age && non_shared_age) { + flow->age = + flow_dv_translate_create_aso_age + (dev, + non_shared_age, + error); + if (!flow->age) + return rte_flow_error_set + (error, rte_errno, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "can't create ASO age action"); + } + age_act = flow_aso_age_get_by_idx(dev, + flow->age); + dev_flow->dv.actions[age_act_pos] = + age_act->dr_action; + } if (action_flags & MLX5_FLOW_ACTION_COUNT) { /* * Create one count action, to be used * by all sub-flows. */ - if (!flow->counter) { - flow->counter = - flow_dv_translate_create_counter - (dev, dev_flow, count, - age); - if (!flow->counter) - return rte_flow_error_set - (error, rte_errno, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, "cannot create counter" - " object."); - } - dev_flow->dv.actions[actions_n] = - (flow_dv_counter_get_by_idx(dev, - flow->counter, NULL))->action; - actions_n++; + cnt_act = flow_dv_prepare_counter(dev, dev_flow, + flow, count, + NULL, error); + if (!cnt_act) + return -rte_errno; + dev_flow->dv.actions[actions_n++] = + cnt_act->action; } default: break; @@ -12014,6 +12909,15 @@ flow_dv_translate(struct rte_eth_dev *dev, /* No other protocol should follow eCPRI layer. */ last_item = MLX5_FLOW_LAYER_ECPRI; break; + case RTE_FLOW_ITEM_TYPE_INTEGRITY: + flow_dv_translate_item_integrity(match_mask, + match_value, + head_item, items); + break; + case RTE_FLOW_ITEM_TYPE_CONNTRACK: + flow_dv_translate_item_aso_ct(dev, match_mask, + match_value, items); + break; default: break; } @@ -12109,6 +13013,8 @@ flow_dv_translate(struct rte_eth_dev *dev, } dev_flow->dv.actions_n = actions_n; dev_flow->act_flags = action_flags; + if (wks->skip_matcher_reg) + return 0; /* Register matcher. */ matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf, matcher.mask.size); @@ -12865,7 +13771,10 @@ flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow) mlx5_flow_meter_detach(priv, fm); flow->meter = 0; } - if (flow->age) + /* Keep the current age handling by default. */ + if (flow->indirect_type == MLX5_INDIRECT_ACTION_TYPE_CT && flow->ct) + flow_dv_aso_ct_release(dev, flow->ct); + else if (flow->age) flow_dv_aso_age_release(dev, flow->age); if (flow->geneve_tlv_option) { flow_dv_geneve_tlv_option_resource_release(dev); @@ -13294,6 +14203,7 @@ flow_dv_action_create(struct rte_eth_dev *dev, { uint32_t idx = 0; uint32_t ret = 0; + struct mlx5_priv *priv = dev->data->dev_private; switch (action->type) { case RTE_FLOW_ACTION_TYPE_RSS: @@ -13314,6 +14224,16 @@ flow_dv_action_create(struct rte_eth_dev *dev, (void *)(uintptr_t)idx; } break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = flow_dv_translate_create_counter(dev, NULL, NULL, NULL); + idx = (MLX5_INDIRECT_ACTION_TYPE_COUNT << + MLX5_INDIRECT_ACTION_TYPE_OFFSET) | ret; + break; + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + ret = flow_dv_translate_create_conntrack(dev, action->conf, + err); + idx = MLX5_INDIRECT_ACT_CT_GEN_IDX(PORT_ID(priv), ret); + break; default: rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, "action type not supported"); @@ -13347,11 +14267,25 @@ flow_dv_action_destroy(struct rte_eth_dev *dev, uint32_t act_idx = (uint32_t)(uintptr_t)handle; uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); + struct mlx5_flow_counter *cnt; + uint32_t no_flow_refcnt = 1; int ret; switch (type) { case MLX5_INDIRECT_ACTION_TYPE_RSS: return __flow_dv_action_rss_release(dev, idx, error); + case MLX5_INDIRECT_ACTION_TYPE_COUNT: + cnt = flow_dv_counter_get_by_idx(dev, idx, NULL); + if (!__atomic_compare_exchange_n(&cnt->shared_info.refcnt, + &no_flow_refcnt, 1, false, + __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) + return rte_flow_error_set(error, EBUSY, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Indirect count action has references"); + flow_dv_counter_free(dev, idx); + return 0; case MLX5_INDIRECT_ACTION_TYPE_AGE: ret = flow_dv_aso_age_release(dev, idx); if (ret) @@ -13362,6 +14296,14 @@ flow_dv_action_destroy(struct rte_eth_dev *dev, DRV_LOG(DEBUG, "Indirect age action %" PRIu32 " was" " released with references %d.", idx, ret); return 0; + case MLX5_INDIRECT_ACTION_TYPE_CT: + ret = flow_dv_aso_ct_release(dev, idx); + if (ret < 0) + return ret; + if (ret > 0) + DRV_LOG(DEBUG, "Connection tracking object %u still " + "has references %d.", idx, ret); + return 0; default: return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -13436,6 +14378,72 @@ __flow_dv_action_rss_update(struct rte_eth_dev *dev, uint32_t idx, return ret; } +/* + * Updates in place conntrack context or direction. + * Context update should be synchronized. + * + * @param[in] dev + * Pointer to the Ethernet device structure. + * @param[in] idx + * The conntrack object ID to be updated. + * @param[in] update + * Pointer to the structure of information to update. + * @param[out] error + * Perform verbose error reporting if not NULL. Initialized in case of + * error only. + * + * @return + * 0 on success, otherwise negative errno value. + */ +static int +__flow_dv_action_ct_update(struct rte_eth_dev *dev, uint32_t idx, + const struct rte_flow_modify_conntrack *update, + struct rte_flow_error *error) +{ + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_ct_action *ct; + const struct rte_flow_action_conntrack *new_prf; + int ret = 0; + uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx); + uint32_t dev_idx; + + if (PORT_ID(priv) != owner) + return rte_flow_error_set(error, EACCES, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object owned by another port"); + dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx); + ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx); + if (!ct->refcnt) + return rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object is inactive"); + new_prf = &update->new_ct; + if (update->direction) + ct->is_original = !!new_prf->is_original_dir; + if (update->state) { + /* Only validate the profile when it needs to be updated. */ + ret = mlx5_validate_action_ct(dev, new_prf, error); + if (ret) + return ret; + ret = mlx5_aso_ct_update_by_wqe(priv->sh, ct, new_prf); + if (ret) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to send CT context update WQE"); + /* Block until ready or a failure. */ + ret = mlx5_aso_ct_available(priv->sh, ct); + if (ret) + rte_flow_error_set(error, rte_errno, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Timeout to get the CT update"); + } + return ret; +} + /** * Updates in place shared action configuration, lock free, * (mutex should be acquired by caller). @@ -13471,6 +14479,8 @@ flow_dv_action_update(struct rte_eth_dev *dev, case MLX5_INDIRECT_ACTION_TYPE_RSS: action_conf = ((const struct rte_flow_action *)update)->conf; return __flow_dv_action_rss_update(dev, idx, action_conf, err); + case MLX5_INDIRECT_ACTION_TYPE_CT: + return __flow_dv_action_ct_update(dev, idx, update, err); default: return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, @@ -13479,37 +14489,6 @@ flow_dv_action_update(struct rte_eth_dev *dev, } } -static int -flow_dv_action_query(struct rte_eth_dev *dev, - const struct rte_flow_action_handle *handle, void *data, - struct rte_flow_error *error) -{ - struct mlx5_age_param *age_param; - struct rte_flow_query_age *resp; - uint32_t act_idx = (uint32_t)(uintptr_t)handle; - uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; - uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); - - switch (type) { - case MLX5_INDIRECT_ACTION_TYPE_AGE: - age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params; - resp = data; - resp->aged = __atomic_load_n(&age_param->state, - __ATOMIC_RELAXED) == AGE_TMOUT ? - 1 : 0; - resp->sec_since_last_hit_valid = !resp->aged; - if (resp->sec_since_last_hit_valid) - resp->sec_since_last_hit = __atomic_load_n - (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); - return 0; - default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - NULL, - "action type query not supported"); - } -} - /** * Destroy the meter sub policy table rules. * Lock free, (mutex should be acquired by caller). @@ -14061,14 +15040,14 @@ flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev, } /** - * Query a dv flow rule for its statistics via devx. + * Query a DV flow rule for its statistics via DevX. * * @param[in] dev * Pointer to Ethernet device. - * @param[in] flow - * Pointer to the sub flow. + * @param[in] cnt_idx + * Index to the flow counter. * @param[out] data - * data retrieved by the query. + * Data retrieved by the query. * @param[out] error * Perform verbose error reporting if not NULL. * @@ -14076,8 +15055,8 @@ flow_dv_create_mtr_policy_acts(struct rte_eth_dev *dev, * 0 on success, a negative errno value otherwise and rte_errno is set. */ static int -flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, - void *data, struct rte_flow_error *error) +flow_dv_query_count(struct rte_eth_dev *dev, uint32_t cnt_idx, void *data, + struct rte_flow_error *error) { struct mlx5_priv *priv = dev->data->dev_private; struct rte_flow_query_count *qc = data; @@ -14087,19 +15066,16 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "counters are not supported"); - if (flow->counter) { + if (cnt_idx) { uint64_t pkts, bytes; struct mlx5_flow_counter *cnt; - - cnt = flow_dv_counter_get_by_idx(dev, flow->counter, - NULL); - int err = _flow_dv_query_count(dev, flow->counter, &pkts, - &bytes); + int err = _flow_dv_query_count(dev, cnt_idx, &pkts, &bytes); if (err) return rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot read counters"); + cnt = flow_dv_counter_get_by_idx(dev, cnt_idx, NULL); qc->hits_set = 1; qc->bytes_set = 1; qc->hits = pkts - cnt->hits; @@ -14116,6 +15092,67 @@ flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow, "counters are not available"); } +static int +flow_dv_action_query(struct rte_eth_dev *dev, + const struct rte_flow_action_handle *handle, void *data, + struct rte_flow_error *error) +{ + struct mlx5_age_param *age_param; + struct rte_flow_query_age *resp; + uint32_t act_idx = (uint32_t)(uintptr_t)handle; + uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET; + uint32_t idx = act_idx & ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1); + struct mlx5_priv *priv = dev->data->dev_private; + struct mlx5_aso_ct_action *ct; + uint16_t owner; + uint32_t dev_idx; + + switch (type) { + case MLX5_INDIRECT_ACTION_TYPE_AGE: + age_param = &flow_aso_age_get_by_idx(dev, idx)->age_params; + resp = data; + resp->aged = __atomic_load_n(&age_param->state, + __ATOMIC_RELAXED) == AGE_TMOUT ? + 1 : 0; + resp->sec_since_last_hit_valid = !resp->aged; + if (resp->sec_since_last_hit_valid) + resp->sec_since_last_hit = __atomic_load_n + (&age_param->sec_since_last_hit, __ATOMIC_RELAXED); + return 0; + case MLX5_INDIRECT_ACTION_TYPE_COUNT: + return flow_dv_query_count(dev, idx, data, error); + case MLX5_INDIRECT_ACTION_TYPE_CT: + owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(idx); + if (owner != PORT_ID(priv)) + return rte_flow_error_set(error, EACCES, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object owned by another port"); + dev_idx = MLX5_INDIRECT_ACT_CT_GET_IDX(idx); + ct = flow_aso_ct_get_by_dev_idx(dev, dev_idx); + MLX5_ASSERT(ct); + if (!ct->refcnt) + return rte_flow_error_set(error, EFAULT, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "CT object is inactive"); + ((struct rte_flow_action_conntrack *)data)->peer_port = + ct->peer; + ((struct rte_flow_action_conntrack *)data)->is_original_dir = + ct->is_original; + if (mlx5_aso_ct_query_by_wqe(priv->sh, ct, data)) + return rte_flow_error_set(error, EIO, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "Failed to query CT context"); + return 0; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, NULL, + "action type query not supported"); + } +} + /** * Query a flow rule AGE action for aging information. * @@ -14185,7 +15222,8 @@ flow_dv_query(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_VOID: break; case RTE_FLOW_ACTION_TYPE_COUNT: - ret = flow_dv_query_count(dev, flow, data, error); + ret = flow_dv_query_count(dev, flow->counter, data, + error); break; case RTE_FLOW_ACTION_TYPE_AGE: ret = flow_dv_query_age(dev, flow, data, error); @@ -15226,7 +16264,7 @@ flow_dv_counter_allocate(struct rte_eth_dev *dev) * @param[in] dev * Pointer to the Ethernet device structure. * @param[in] conf - * Shared action configuration. + * Indirect action configuration. * @param[in] action * The indirect action object to validate. * @param[out] error @@ -15253,22 +16291,42 @@ flow_dv_action_validate(struct rte_eth_dev *dev, * sufficient, it is set to devx_obj_ops. * Otherwise, it is set to ibv_obj_ops. * ibv_obj_ops doesn't support ind_table_modify operation. - * In this case the shared RSS action can't be used. + * In this case the indirect RSS action can't be used. */ if (priv->obj_ops.ind_table_modify == NULL) return rte_flow_error_set (err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, NULL, - "shared RSS action not supported"); + "Indirect RSS action not supported"); return mlx5_validate_action_rss(dev, action, err); case RTE_FLOW_ACTION_TYPE_AGE: if (!priv->sh->aso_age_mng) return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, - "shared age action not supported"); + "Indirect age action not supported"); return flow_dv_validate_action_age(0, action, dev, err); + case RTE_FLOW_ACTION_TYPE_COUNT: + /* + * There are two mechanisms to share the action count. + * The old mechanism uses the shared field to share, while the + * new mechanism uses the indirect action API. + * This validation comes to make sure that the two mechanisms + * are not combined. + */ + if (is_shared_action_count(action)) + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + NULL, + "Mix shared and indirect counter is not supported"); + return flow_dv_validate_action_count(dev, true, 0, err); + case RTE_FLOW_ACTION_TYPE_CONNTRACK: + if (!priv->sh->ct_aso_en) + return rte_flow_error_set(err, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, + "ASO CT is not supported"); + return mlx5_validate_action_ct(dev, action->conf, err); default: return rte_flow_error_set(err, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,