X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fmlx5%2Fmlx5_flow_dv.c;h=4a35010d8bf615d97c88f17778b1eccc985050a5;hb=6db1fde3891c493b9d352487fc8b6384cc6d06f3;hp=504d842c09f3c3a2fe7bb67d4ffc29c2f1868093;hpb=4ec6360de37dd92bf7411ec0f863cc13e5c0b0cc;p=dpdk.git diff --git a/drivers/net/mlx5/mlx5_flow_dv.c b/drivers/net/mlx5/mlx5_flow_dv.c index 504d842c09..4a35010d8b 100644 --- a/drivers/net/mlx5/mlx5_flow_dv.c +++ b/drivers/net/mlx5/mlx5_flow_dv.c @@ -33,6 +33,7 @@ #include "mlx5_flow.h" #include "mlx5_flow_os.h" #include "mlx5_rxtx.h" +#include "rte_pmd_mlx5.h" #ifdef HAVE_IBV_FLOW_DV_SUPPORT @@ -1676,6 +1677,7 @@ flow_dv_validate_item_vlan(const struct rte_flow_item *item, const struct rte_flow_item_vlan nic_mask = { .tci = RTE_BE16(UINT16_MAX), .inner_type = RTE_BE16(UINT16_MAX), + .has_more_vlan = 1, }; const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL); int ret; @@ -2872,8 +2874,10 @@ flow_dv_encap_decap_resource_register struct mlx5_flow_dv_encap_decap_resource, entry); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_encap_decap = cache_resource->idx; dev_flow->dv.encap_decap = cache_resource; return 0; @@ -2896,8 +2900,7 @@ flow_dv_encap_decap_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); if (mlx5_hlist_insert_ex(sh->encaps_decaps, &cache_resource->entry, flow_dv_encap_decap_resource_match, (void *)cache_resource)) { @@ -2912,7 +2915,7 @@ flow_dv_encap_decap_resource_register dev_flow->dv.encap_decap = cache_resource; DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -2943,7 +2946,7 @@ flow_dv_jump_tbl_resource_register int cnt, ret; MLX5_ASSERT(tbl); - cnt = rte_atomic32_read(&tbl_data->jump.refcnt); + cnt = __atomic_load_n(&tbl_data->jump.refcnt, __ATOMIC_ACQUIRE); if (!cnt) { ret = mlx5_flow_os_create_flow_action_dest_flow_tbl (tbl->obj, &tbl_data->jump.action); @@ -2960,7 +2963,7 @@ flow_dv_jump_tbl_resource_register DRV_LOG(DEBUG, "existed jump table resource %p: refcnt %d++", (void *)&tbl_data->jump, cnt); } - rte_atomic32_inc(&tbl_data->jump.refcnt); + __atomic_fetch_add(&tbl_data->jump.refcnt, 1, __ATOMIC_RELEASE); dev_flow->handle->rix_jump = tbl_data->idx; dev_flow->dv.jump = &tbl_data->jump; return 0; @@ -2985,7 +2988,7 @@ flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, struct mlx5_dev_ctx_shared *sh = priv->sh; struct mlx5_flow_default_miss_resource *cache_resource = &sh->default_miss; - int cnt = rte_atomic32_read(&cache_resource->refcnt); + int cnt = __atomic_load_n(&cache_resource->refcnt, __ATOMIC_ACQUIRE); if (!cnt) { MLX5_ASSERT(cache_resource->action); @@ -2998,7 +3001,7 @@ flow_dv_default_miss_resource_register(struct rte_eth_dev *dev, DRV_LOG(DEBUG, "new default miss resource %p: refcnt %d++", (void *)cache_resource->action, cnt); } - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_fetch_add(&cache_resource->refcnt, 1, __ATOMIC_RELEASE); return 0; } @@ -3037,8 +3040,10 @@ flow_dv_port_id_action_resource_register DRV_LOG(DEBUG, "port id action resource resource %p: " "refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->rix_port_id_action = idx; dev_flow->dv.port_id_action = cache_resource; return 0; @@ -3061,15 +3066,14 @@ flow_dv_port_id_action_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); ILIST_INSERT(sh->ipool[MLX5_IPOOL_PORT_ID], &sh->port_id_action_list, dev_flow->handle->rix_port_id_action, cache_resource, next); dev_flow->dv.port_id_action = cache_resource; DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -3110,8 +3114,10 @@ flow_dv_push_vlan_action_resource_register DRV_LOG(DEBUG, "push-VLAN action resource resource %p: " "refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_push_vlan = idx; dev_flow->dv.push_vlan_res = cache_resource; return 0; @@ -3140,8 +3146,7 @@ flow_dv_push_vlan_action_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); ILIST_INSERT(sh->ipool[MLX5_IPOOL_PUSH_VLAN], &sh->push_vlan_action_list, dev_flow->handle->dvh.rix_push_vlan, @@ -3149,7 +3154,7 @@ flow_dv_push_vlan_action_resource_register dev_flow->dv.push_vlan_res = cache_resource; DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } /** @@ -4549,8 +4554,10 @@ flow_dv_modify_hdr_resource_register entry); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.modify_hdr = cache_resource; return 0; @@ -4574,8 +4581,7 @@ flow_dv_modify_hdr_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); if (mlx5_hlist_insert_ex(sh->modify_cmds, &cache_resource->entry, flow_dv_modify_hdr_resource_match, (void *)cache_resource)) { @@ -4589,7 +4595,7 @@ flow_dv_modify_hdr_resource_register dev_flow->handle->dvh.modify_hdr = cache_resource; DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -5310,6 +5316,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, .transfer = !!attr->transfer, .fdb_def_rule = !!priv->fdb_def_rule, }; + const struct rte_eth_hairpin_conf *conf; if (items == NULL) return -1; @@ -5358,7 +5365,7 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, break; case RTE_FLOW_ITEM_TYPE_ETH: ret = mlx5_flow_validate_item_eth(items, item_flags, - error); + true, error); if (ret < 0) return ret; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 : @@ -6155,11 +6162,18 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, actions, "no fate action is found"); } - /* Continue validation for Xcap and VLAN actions.*/ + /* + * Continue validation for Xcap and VLAN actions. + * If hairpin is working in explicit TX rule mode, there is no actions + * splitting and the validation of hairpin ingress flow should be the + * same as other standard flows. + */ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS | MLX5_FLOW_VLAN_ACTIONS)) && (queue_index == 0xFFFF || - mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) { + mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN || + ((conf = mlx5_rxq_get_hairpin_conf(dev, queue_index)) != NULL && + conf->tx_explicit != 0))) { if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) == MLX5_FLOW_XCAP_ACTIONS) return rte_flow_error_set(error, ENOTSUP, @@ -6188,7 +6202,10 @@ flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, "multiple VLAN actions"); } } - /* Hairpin flow will add one more TAG action. */ + /* + * Hairpin flow will add one more TAG action in TX implicit mode. + * In TX explicit mode, there will be no hairpin flow ID. + */ if (hairpin > 0) rw_act_num += MLX5_ACT_NUM_SET_TAG; /* extra metadata enabled: one more TAG action will be add. */ @@ -6360,9 +6377,10 @@ flow_dv_translate_item_eth(void *matcher, void *key, .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff", .src.addr_bytes = "\xff\xff\xff\xff\xff\xff", .type = RTE_BE16(0xffff), + .has_vlan = 0, }; - void *headers_m; - void *headers_v; + void *hdrs_m; + void *hdrs_v; char *l24_v; unsigned int i; @@ -6371,38 +6389,26 @@ flow_dv_translate_item_eth(void *matcher, void *key, if (!eth_m) eth_m = &nic_mask; if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16), + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, dmac_47_16), ð_m->dst, sizeof(eth_m->dst)); /* The value must be in the range of the mask. */ - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, dmac_47_16); for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i]; - memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16), + memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_m, smac_47_16), ð_m->src, sizeof(eth_m->src)); - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, smac_47_16); /* The value must be in the range of the mask. */ for (i = 0; i < sizeof(eth_m->dst); ++i) l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i]; - if (eth_v->type) { - /* When ethertype is present set mask for tagged VLAN. */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); - /* Set value for tagged VLAN if ethertype is 802.1Q. */ - if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) || - eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) { - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, - 1); - /* Return here to avoid setting match on ethertype. */ - return; - } - } /* * HW supports match on one Ethertype, the Ethertype following the last * VLAN tag of the packet (see PRM). @@ -6411,19 +6417,42 @@ flow_dv_translate_item_eth(void *matcher, void *key, * ethertype, and use ip_version field instead. * eCPRI over Ether layer will use type value 0xAEFE. */ - if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && - eth_m->type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && - eth_m->type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - } else { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, - rte_be_to_cpu_16(eth_m->type)); - l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, - ethertype); - *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; + if (eth_m->type == 0xFFFF) { + /* Set cvlan_tag mask for any single\multi\un-tagged case. */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + switch (eth_v->type) { + case RTE_BE16(RTE_ETHER_TYPE_VLAN): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + return; + case RTE_BE16(RTE_ETHER_TYPE_QINQ): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV4): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV6): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6); + return; + default: + break; + } } + if (eth_m->has_vlan) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + if (eth_v->has_vlan) { + /* + * Here, when also has_more_vlan field in VLAN item is + * not set, only single-tagged packets will be matched. + */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + return; + } + } + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype, + rte_be_to_cpu_16(eth_m->type)); + l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, hdrs_v, ethertype); + *(uint16_t *)(l24_v) = eth_m->type & eth_v->type; } /** @@ -6448,19 +6477,19 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, { const struct rte_flow_item_vlan *vlan_m = item->mask; const struct rte_flow_item_vlan *vlan_v = item->spec; - void *headers_m; - void *headers_v; + void *hdrs_m; + void *hdrs_v; uint16_t tci_m; uint16_t tci_v; if (inner) { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, inner_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers); } else { - headers_m = MLX5_ADDR_OF(fte_match_param, matcher, + hdrs_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers); - headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); + hdrs_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); /* * This is workaround, masks are not supported, * and pre-validated. @@ -6473,37 +6502,54 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * When VLAN item exists in flow, mark packet as tagged, * even if TCI is not specified. */ - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); + if (!MLX5_GET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag)) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, cvlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 1); + } if (!vlan_v) return; if (!vlan_m) vlan_m = &rte_flow_item_vlan_mask; tci_m = rte_be_to_cpu_16(vlan_m->tci); tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12); - MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_vid, tci_m); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_vid, tci_v); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_cfi, tci_m >> 12); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_cfi, tci_v >> 12); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, first_prio, tci_m >> 13); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, first_prio, tci_v >> 13); /* * HW is optimized for IPv4/IPv6. In such cases, avoid setting * ethertype, and use ip_version field instead. */ - if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) && - vlan_m->inner_type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) && - vlan_m->inner_type == 0xFFFF) { - flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - } else { - MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, - rte_be_to_cpu_16(vlan_m->inner_type)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, - rte_be_to_cpu_16(vlan_m->inner_type & - vlan_v->inner_type)); + if (vlan_m->inner_type == 0xFFFF) { + switch (vlan_v->inner_type) { + case RTE_BE16(RTE_ETHER_TYPE_VLAN): + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV4): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 4); + return; + case RTE_BE16(RTE_ETHER_TYPE_IPV6): + flow_dv_set_match_ip_version(group, hdrs_v, hdrs_m, 6); + return; + default: + break; + } + } + if (vlan_m->has_more_vlan && vlan_v->has_more_vlan) { + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, svlan_tag, 1); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, svlan_tag, 1); + /* Only one vlan_tag bit can be set. */ + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, cvlan_tag, 0); + return; } + MLX5_SET(fte_match_set_lyr_2_4, hdrs_m, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type)); + MLX5_SET(fte_match_set_lyr_2_4, hdrs_v, ethertype, + rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type)); } /** @@ -6515,8 +6561,6 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] item_flags - * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -6525,7 +6569,6 @@ flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow, static void flow_dv_translate_item_ipv4(void *matcher, void *key, const struct rte_flow_item *item, - const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv4 *ipv4_m = item->mask; @@ -6555,13 +6598,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } flow_dv_set_match_ip_version(group, headers_v, headers_m, 4); - /* - * On outer header (which must contains L2), or inner header with L2, - * set cvlan_tag mask bit to mark this packet as untagged. - * This should be done even if item->spec is empty. - */ - if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv4_v) return; if (!ipv4_m) @@ -6608,8 +6644,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, * Flow matcher value. * @param[in] item * Flow pattern to translate. - * @param[in] item_flags - * Bit-fields that holds the items detected until now. * @param[in] inner * Item is inner pattern. * @param[in] group @@ -6618,7 +6652,6 @@ flow_dv_translate_item_ipv4(void *matcher, void *key, static void flow_dv_translate_item_ipv6(void *matcher, void *key, const struct rte_flow_item *item, - const uint64_t item_flags, int inner, uint32_t group) { const struct rte_flow_item_ipv6 *ipv6_m = item->mask; @@ -6657,13 +6690,6 @@ flow_dv_translate_item_ipv6(void *matcher, void *key, headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers); } flow_dv_set_match_ip_version(group, headers_v, headers_m, 6); - /* - * On outer header (which must contains L2), or inner header with L2, - * set cvlan_tag mask bit to mark this packet as untagged. - * This should be done even if item->spec is empty. - */ - if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2) - MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1); if (!ipv6_v) return; if (!ipv6_m) @@ -8000,7 +8026,7 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry, entry); tbl = &tbl_data->tbl; - rte_atomic32_inc(&tbl->refcnt); + __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED); return tbl; } tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx); @@ -8035,9 +8061,9 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, * No multi-threads now, but still better to initialize the reference * count before insert it into the hash list. */ - rte_atomic32_init(&tbl->refcnt); + __atomic_store_n(&tbl->refcnt, 0, __ATOMIC_RELAXED); /* Jump action reference count is initialized here. */ - rte_atomic32_init(&tbl_data->jump.refcnt); + __atomic_store_n(&tbl_data->jump.refcnt, 0, __ATOMIC_RELAXED); pos->key = table_key.v64; ret = mlx5_hlist_insert(sh->flow_tbls, pos); if (ret < 0) { @@ -8047,7 +8073,7 @@ flow_dv_tbl_resource_get(struct rte_eth_dev *dev, mlx5_flow_os_destroy_flow_tbl(tbl->obj); mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx); } - rte_atomic32_inc(&tbl->refcnt); + __atomic_fetch_add(&tbl->refcnt, 1, __ATOMIC_RELAXED); return tbl; } @@ -8073,7 +8099,7 @@ flow_dv_tbl_resource_release(struct rte_eth_dev *dev, if (!tbl) return 0; - if (rte_atomic32_dec_and_test(&tbl->refcnt)) { + if (__atomic_sub_fetch(&tbl->refcnt, 1, __ATOMIC_RELAXED) == 0) { struct mlx5_hlist_entry *pos = &tbl_data->entry; mlx5_flow_os_destroy_flow_tbl(tbl->obj); @@ -8176,8 +8202,10 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, cache_matcher->priority, key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); - rte_atomic32_inc(&cache_matcher->refcnt); + __atomic_load_n(&cache_matcher->refcnt, + __ATOMIC_RELAXED)); + __atomic_fetch_add(&cache_matcher->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.matcher = cache_matcher; /* old matcher should not make the table ref++. */ flow_dv_tbl_resource_release(dev, tbl); @@ -8212,16 +8240,15 @@ flow_dv_matcher_register(struct rte_eth_dev *dev, } /* Save the table information */ cache_matcher->tbl = tbl; - rte_atomic32_init(&cache_matcher->refcnt); /* only matcher ref++, table ref++ already done above in get API. */ - rte_atomic32_inc(&cache_matcher->refcnt); + __atomic_store_n(&cache_matcher->refcnt, 1, __ATOMIC_RELAXED); LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next); dev_flow->handle->dvh.matcher = cache_matcher; DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d", key->domain ? "FDB" : "NIC", key->table_id, cache_matcher->priority, key->direction ? "tx" : "rx", (void *)cache_matcher, - rte_atomic32_read(&cache_matcher->refcnt)); + __atomic_load_n(&cache_matcher->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -8258,12 +8285,14 @@ flow_dv_tag_resource_register if (entry) { cache_resource = container_of (entry, struct mlx5_flow_dv_tag_resource, entry); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_fetch_add(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED); dev_flow->handle->dvh.rix_tag = cache_resource->idx; dev_flow->dv.tag_resource = cache_resource; DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); return 0; } /* Register new resource. */ @@ -8282,8 +8311,7 @@ flow_dv_tag_resource_register RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, "cannot create action"); } - rte_atomic32_init(&cache_resource->refcnt); - rte_atomic32_inc(&cache_resource->refcnt); + __atomic_store_n(&cache_resource->refcnt, 1, __ATOMIC_RELAXED); if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) { mlx5_flow_os_destroy_flow_action(cache_resource->action); mlx5_free(cache_resource); @@ -8294,7 +8322,7 @@ flow_dv_tag_resource_register dev_flow->dv.tag_resource = cache_resource; DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); return 0; } @@ -8322,8 +8350,8 @@ flow_dv_tag_release(struct rte_eth_dev *dev, return 0; DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--", dev->data->port_id, (void *)tag, - rte_atomic32_read(&tag->refcnt)); - if (rte_atomic32_dec_and_test(&tag->refcnt)) { + __atomic_load_n(&tag->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&tag->refcnt, 1, __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action(tag->action)); mlx5_hlist_remove(sh->tag_table, &tag->entry); DRV_LOG(DEBUG, "port %u tag %p: removed", @@ -9915,7 +9943,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv4(match_mask, match_value, - items, item_flags, tunnel, + items, tunnel, dev_flow->dv.group); matcher.priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 : @@ -9938,7 +9966,7 @@ __flow_dv_translate(struct rte_eth_dev *dev, mlx5_flow_tunnel_ip_check(items, next_protocol, &item_flags, &tunnel); flow_dv_translate_item_ipv6(match_mask, match_value, - items, item_flags, tunnel, + items, tunnel, dev_flow->dv.group); matcher.priority = MLX5_PRIORITY_MAP_L3; last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 : @@ -10262,7 +10290,8 @@ __flow_dv_rss_get_hrxq(struct rte_eth_dev *dev, struct rte_flow *flow, if (hrxq_idx) { *hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ], hrxq_idx); - rte_atomic32_inc(&(*hrxq)->refcnt); + __atomic_fetch_add(&(*hrxq)->refcnt, 1, + __ATOMIC_RELAXED); } } else { struct mlx5_flow_rss_desc *rss_desc = @@ -10441,8 +10470,8 @@ flow_dv_matcher_release(struct rte_eth_dev *dev, MLX5_ASSERT(matcher->matcher_object); DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--", dev->data->port_id, (void *)matcher, - rte_atomic32_read(&matcher->refcnt)); - if (rte_atomic32_dec_and_test(&matcher->refcnt)) { + __atomic_load_n(&matcher->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&matcher->refcnt, 1, __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_matcher (matcher->matcher_object)); LIST_REMOVE(matcher, next); @@ -10482,8 +10511,9 @@ flow_dv_encap_decap_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); mlx5_hlist_remove(priv->sh->encaps_decaps, @@ -10523,8 +10553,9 @@ flow_dv_jump_tbl_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); /* jump action memory free is inside the table release. */ @@ -10555,8 +10586,10 @@ flow_dv_default_miss_resource_release(struct rte_eth_dev *dev) MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "default miss resource %p: refcnt %d--", (void *)cache_resource->action, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, + __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_glue->destroy_flow_action (cache_resource->action)); DRV_LOG(DEBUG, "default miss resource %p: removed", @@ -10588,8 +10621,9 @@ flow_dv_modify_hdr_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); mlx5_hlist_remove(priv->sh->modify_cmds, @@ -10628,8 +10662,9 @@ flow_dv_port_id_action_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PORT_ID], @@ -10669,8 +10704,9 @@ flow_dv_push_vlan_action_resource_release(struct rte_eth_dev *dev, MLX5_ASSERT(cache_resource->action); DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--", (void *)cache_resource, - rte_atomic32_read(&cache_resource->refcnt)); - if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) { + __atomic_load_n(&cache_resource->refcnt, __ATOMIC_RELAXED)); + if (__atomic_sub_fetch(&cache_resource->refcnt, 1, + __ATOMIC_RELAXED) == 0) { claim_zero(mlx5_flow_os_destroy_flow_action (cache_resource->action)); ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_PUSH_VLAN], @@ -12289,6 +12325,31 @@ flow_dv_action_update(struct rte_eth_dev *dev, return ret; } +static int +flow_dv_sync_domain(struct rte_eth_dev *dev, uint32_t domains, uint32_t flags) +{ + struct mlx5_priv *priv = dev->data->dev_private; + int ret = 0; + + if ((domains & MLX5_DOMAIN_BIT_NIC_RX) && priv->sh->rx_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->rx_domain, + flags); + if (ret != 0) + return ret; + } + if ((domains & MLX5_DOMAIN_BIT_NIC_TX) && priv->sh->tx_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->tx_domain, flags); + if (ret != 0) + return ret; + } + if ((domains & MLX5_DOMAIN_BIT_FDB) && priv->sh->fdb_domain != NULL) { + ret = mlx5_glue->dr_sync_domain(priv->sh->fdb_domain, flags); + if (ret != 0) + return ret; + } + return 0; +} + const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .validate = flow_dv_validate, .prepare = flow_dv_prepare, @@ -12309,6 +12370,7 @@ const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = { .action_create = flow_dv_action_create, .action_destroy = flow_dv_action_destroy, .action_update = flow_dv_action_update, + .sync_domain = flow_dv_sync_domain, }; #endif /* HAVE_IBV_FLOW_DV_SUPPORT */