+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ ret = flow_dv_validate_item_port_id
+ (dev, items, attr, item_flags, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_PORT_ID;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ ret = mlx5_flow_validate_item_eth(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
+ MLX5_FLOW_LAYER_OUTER_L2;
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_eth *)
+ items->spec)->type;
+ ether_type &=
+ ((const struct rte_flow_item_eth *)
+ items->mask)->type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = flow_dv_validate_item_vlan(items, item_flags,
+ dev, error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_vlan *)
+ items->spec)->inner_type;
+ ether_type &=
+ ((const struct rte_flow_item_vlan *)
+ items->mask)->inner_type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
+ /* Store outer VLAN mask for of_push_vlan action. */
+ if (!tunnel)
+ vlan_m = items->mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
+ ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ipv4_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
+ ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ipv6_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto) {
+ item_ipv6_proto =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &nic_tcp_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_validate_item_gre(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ gre_item = items;
+ last_item = MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ ret = mlx5_flow_validate_item_nvgre(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_NVGRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+ ret = mlx5_flow_validate_item_gre_key
+ (items, item_flags, gre_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GRE_KEY;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_validate_item_vxlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_validate_item_vxlan_gpe(items,
+ item_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ ret = mlx5_flow_validate_item_geneve(items,
+ item_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GENEVE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_validate_item_mpls(dev, items,
+ item_flags,
+ last_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_MPLS;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_MARK:
+ ret = flow_dv_validate_item_mark(dev, items, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_MARK;
+ break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ ret = flow_dv_validate_item_meta(dev, items, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_METADATA;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ ret = mlx5_flow_validate_item_icmp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_ICMP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ICMP6:
+ ret = mlx5_flow_validate_item_icmp6(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ item_ipv6_proto = IPPROTO_ICMPV6;
+ last_item = MLX5_FLOW_LAYER_ICMP6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TAG:
+ ret = flow_dv_validate_item_tag(dev, items,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_TAG;
+ break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+ case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTP:
+ ret = flow_dv_validate_item_gtp(dev, items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GTP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ /* Capacity will be checked in the translate stage. */
+ ret = mlx5_flow_validate_item_ecpri(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ecpri_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ item_flags |= last_item;
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ int type = actions->type;
+
+ if (!mlx5_flow_os_action_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "too many actions");
+ switch (type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ ret = flow_dv_validate_action_port_id(dev,
+ action_flags,
+ actions,
+ attr,
+ error);
+ if (ret)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = flow_dv_validate_action_flag(dev, action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ /* Count all modify-header actions as one. */
+ if (!(action_flags &
+ MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_FLAG |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ } else {
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ ++actions_n;
+ }
+ rw_act_num += MLX5_ACT_NUM_SET_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = flow_dv_validate_action_mark(dev, actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ /* Count all modify-header actions as one. */
+ if (!(action_flags &
+ MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ } else {
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ }
+ rw_act_num += MLX5_ACT_NUM_SET_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ ret = flow_dv_validate_action_set_meta(dev, actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_META;
+ rw_act_num += MLX5_ACT_NUM_SET_META;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ ret = flow_dv_validate_action_set_tag(dev, actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_validate_action_drop(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(actions,
+ action_flags, dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ queue_index = ((const struct rte_flow_action_queue *)
+ (actions->conf))->index;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
+ ret = mlx5_flow_validate_action_rss(actions,
+ action_flags, dev,
+ attr, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ if (rss != NULL && rss->queue_num)
+ queue_index = rss->queue[0];
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ ++actions_n;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ ret =
+ mlx5_flow_validate_action_default_miss(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_dv_validate_action_count(dev, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ if (flow_dv_validate_action_pop_vlan(dev,
+ action_flags,
+ actions,
+ item_flags, attr,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ ret = flow_dv_validate_action_push_vlan(dev,
+ action_flags,
+ vlan_m,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ ret = flow_dv_validate_action_set_vlan_pcp
+ (action_flags, actions, error);
+ if (ret < 0)
+ return ret;
+ /* Count PCP with push_vlan command. */
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ ret = flow_dv_validate_action_set_vlan_vid
+ (item_flags, action_flags,
+ actions, error);
+ if (ret < 0)
+ return ret;
+ /* Count VID with push_vlan command. */
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ rw_act_num += MLX5_ACT_NUM_MDF_VID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = flow_dv_validate_action_l2_encap(dev,
+ action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ ret = flow_dv_validate_action_decap(dev, action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev, NULL, actions->conf, attr, &action_flags,
+ &actions_n, error);
+ if (ret < 0)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap = actions->conf;
+ while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ encap = NULL;
+ actions--;
+ } else {
+ encap = actions->conf;
+ }
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev,
+ decap ? decap : &empty_decap, encap,
+ attr, &action_flags, &actions_n,
+ error);
+ if (ret < 0)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ ret = flow_dv_validate_action_modify_mac(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
+ MLX5_FLOW_ACTION_SET_MAC_SRC :
+ MLX5_FLOW_ACTION_SET_MAC_DST;
+ /*
+ * Even if the source and destination MAC addresses have
+ * overlap in the header with 4B alignment, the convert
+ * function will handle them separately and 4 SW actions
+ * will be created. And 2 actions will be added each
+ * time no matter how many bytes of address will be set.
+ */
+ rw_act_num += MLX5_ACT_NUM_MDF_MAC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ ret = flow_dv_validate_action_modify_ipv4(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV4_SRC :
+ MLX5_FLOW_ACTION_SET_IPV4_DST;
+ rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ ret = flow_dv_validate_action_modify_ipv6(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ if (item_ipv6_proto == IPPROTO_ICMPV6)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Can't change header "
+ "with ICMPv6 proto");
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV6_SRC :
+ MLX5_FLOW_ACTION_SET_IPV6_DST;
+ rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ ret = flow_dv_validate_action_modify_tp(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
+ MLX5_FLOW_ACTION_SET_TP_SRC :
+ MLX5_FLOW_ACTION_SET_TP_DST;
+ rw_act_num += MLX5_ACT_NUM_MDF_PORT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ ret = flow_dv_validate_action_modify_ttl(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TTL ?
+ MLX5_FLOW_ACTION_SET_TTL :
+ MLX5_FLOW_ACTION_DEC_TTL;
+ rw_act_num += MLX5_ACT_NUM_MDF_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = flow_dv_validate_action_jump(actions,
+ action_flags,
+ attr, external,
+ error);
+ if (ret)
+ return ret;
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_JUMP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+ ret = flow_dv_validate_action_modify_tcp_seq
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
+ MLX5_FLOW_ACTION_INC_TCP_SEQ :
+ MLX5_FLOW_ACTION_DEC_TCP_SEQ;
+ rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
+ break;
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+ ret = flow_dv_validate_action_modify_tcp_ack
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
+ MLX5_FLOW_ACTION_INC_TCP_ACK :
+ MLX5_FLOW_ACTION_DEC_TCP_ACK;
+ rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
+ case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ ret = mlx5_flow_validate_action_meter(dev,
+ action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_METER;
+ ++actions_n;
+ /* Meter action will add one more TAG action. */
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_dv_validate_action_age(action_flags,
+ actions, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+ ret = flow_dv_validate_action_modify_ipv4_dscp
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
+ rw_act_num += MLX5_ACT_NUM_SET_DSCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ ret = flow_dv_validate_action_modify_ipv6_dscp
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
+ rw_act_num += MLX5_ACT_NUM_SET_DSCP;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ /*
+ * Validate the drop action mutual exclusion with other actions.
+ * Drop action is mutually-exclusive with any other action, except for
+ * Count action.
+ */
+ if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
+ (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Drop action is mutually-exclusive "
+ "with any other action, except for "
+ "Count action");
+ /* Eswitch has few restrictions on using items and actions */
+ if (attr->transfer) {
+ if (!mlx5_flow_ext_mreg_supported(dev) &&
+ action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action FLAG");
+ if (!mlx5_flow_ext_mreg_supported(dev) &&
+ action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action MARK");
+ if (action_flags & MLX5_FLOW_ACTION_QUEUE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action QUEUE");
+ if (action_flags & MLX5_FLOW_ACTION_RSS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action RSS");
+ if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no fate action is found");
+ } else {
+ if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no fate action is found");
+ }
+ /* Continue validation for Xcap and VLAN actions.*/
+ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
+ MLX5_FLOW_VLAN_ACTIONS)) &&
+ (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
+ MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap and decap "
+ "combination aren't supported");
+ if (!attr->transfer && attr->ingress) {
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "push VLAN action not "
+ "supported for ingress");
+ else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
+ MLX5_FLOW_VLAN_ACTIONS)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no support for "
+ "multiple VLAN actions");
+ }
+ }
+ /* Hairpin flow will add one more TAG action. */
+ if (hairpin > 0)
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ /* extra metadata enabled: one more TAG action will be add. */
+ if (dev_conf->dv_flow_en &&
+ dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ mlx5_flow_ext_mreg_supported(dev))
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ if ((uint32_t)rw_act_num >
+ flow_dv_modify_hdr_action_max(dev, is_root)) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "too many header modify"
+ " actions to support");
+ }
+ return 0;
+}
+
+/**
+ * Internal preparation function. Allocates the DV flow size,
+ * this size is constant.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to mlx5_flow object on success,
+ * otherwise NULL and rte_errno is set.
+ */
+static struct mlx5_flow *
+flow_dv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error)
+{
+ uint32_t handle_idx = 0;
+ struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ /* In case of corrupting the memory. */
+ if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+ rte_flow_error_set(error, ENOSPC,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not free temporary device flow");
+ return NULL;
+ }
+ dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ &handle_idx);
+ if (!dev_handle) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not enough memory to create flow handle");
+ return NULL;
+ }
+ /* No multi-thread supporting. */
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+ dev_flow->handle = dev_handle;
+ dev_flow->handle_idx = handle_idx;
+ /*
+ * In some old rdma-core releases, before continuing, a check of the
+ * length of matching parameter will be done at first. It needs to use
+ * the length without misc4 param. If the flow has misc4 support, then
+ * the length needs to be adjusted accordingly. Each param member is
+ * aligned with a 64B boundary naturally.
+ */
+ dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4);
+ /*
+ * The matching value needs to be cleared to 0 before using. In the
+ * past, it will be automatically cleared when using rte_*alloc
+ * API. The time consumption will be almost the same as before.
+ */
+ memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ dev_flow->ingress = attr->ingress;
+ dev_flow->dv.transfer = attr->transfer;
+ return dev_flow;
+}
+
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+/**
+ * Sanity check for match mask and value. Similar to check_valid_spec() in
+ * kernel driver. If unmasked bit is present in value, it returns failure.
+ *
+ * @param match_mask
+ * pointer to match mask buffer.
+ * @param match_value
+ * pointer to match value buffer.
+ *
+ * @return
+ * 0 if valid, -EINVAL otherwise.
+ */
+static int
+flow_dv_check_valid_spec(void *match_mask, void *match_value)
+{
+ uint8_t *m = match_mask;
+ uint8_t *v = match_value;
+ unsigned int i;
+
+ for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
+ if (v[i] & ~m[i]) {
+ DRV_LOG(ERR,
+ "match_value differs from match_criteria"
+ " %p[%u] != %p[%u]",
+ match_value, i, match_mask, i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+#endif
+
+/**
+ * Add match of ip_version.
+ *
+ * @param[in] group
+ * Flow group.
+ * @param[in] headers_v
+ * Values header pointer.
+ * @param[in] headers_m
+ * Masks header pointer.
+ * @param[in] ip_version
+ * The IP version to set.
+ */
+static inline void
+flow_dv_set_match_ip_version(uint32_t group,
+ void *headers_v,
+ void *headers_m,
+ uint8_t ip_version)
+{
+ if (group == 0)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ else
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
+ ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
+}
+
+/**
+ * Add Ethernet item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_eth(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner,
+ uint32_t group)
+{
+ const struct rte_flow_item_eth *eth_m = item->mask;
+ const struct rte_flow_item_eth *eth_v = item->spec;
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
+ };
+ void *headers_m;
+ void *headers_v;
+ char *l24_v;
+ unsigned int i;
+
+ if (!eth_v)
+ return;
+ if (!eth_m)
+ eth_m = &nic_mask;
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
+ ð_m->dst, sizeof(eth_m->dst));
+ /* The value must be in the range of the mask. */
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
+ for (i = 0; i < sizeof(eth_m->dst); ++i)
+ l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
+ ð_m->src, sizeof(eth_m->src));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
+ /* The value must be in the range of the mask. */
+ for (i = 0; i < sizeof(eth_m->dst); ++i)
+ l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
+ if (eth_v->type) {
+ /* When ethertype is present set mask for tagged VLAN. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ /* Set value for tagged VLAN if ethertype is 802.1Q. */
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
+ eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
+ 1);
+ /* Return here to avoid setting match on ethertype. */
+ return;
+ }
+ }
+ /*
+ * HW supports match on one Ethertype, the Ethertype following the last
+ * VLAN tag of the packet (see PRM).
+ * Set match on ethertype only if ETH header is not followed by VLAN.
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ * eCPRI over Ether layer will use type value 0xAEFE.
+ */
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ }
+}
+
+/**
+ * Add VLAN item to matcher and to the value.
+ *
+ * @param[in, out] dev_flow
+ * Flow descriptor.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
+ void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner, uint32_t group)
+{
+ const struct rte_flow_item_vlan *vlan_m = item->mask;
+ const struct rte_flow_item_vlan *vlan_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ uint16_t tci_m;
+ uint16_t tci_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ /*
+ * This is workaround, masks are not supported,
+ * and pre-validated.
+ */
+ if (vlan_v)
+ dev_flow->handle->vf_vlan.tag =
+ rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
+ }
+ /*
+ * When VLAN item exists in flow, mark packet as tagged,
+ * even if TCI is not specified.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ if (!vlan_v)
+ return;
+ if (!vlan_m)
+ vlan_m = &rte_flow_item_vlan_mask;
+ tci_m = rte_be_to_cpu_16(vlan_m->tci);
+ tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+ /*
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ */
+ if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type &
+ vlan_v->inner_type));
+ }
+}
+
+/**
+ * Add IPV4 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] inner
+ * Item is inner pattern.
+ * @param[in] group
+ * The group to insert the rule.
+ */
+static void
+flow_dv_translate_item_ipv4(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ const uint64_t item_flags,
+ int inner, uint32_t group)
+{
+ const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
+ const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ },
+ };
+ void *headers_m;
+ void *headers_v;
+ char *l24_m;
+ char *l24_v;
+ uint8_t tos;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ * This should be done even if item->spec is empty.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ if (!ipv4_v)
+ return;
+ if (!ipv4_m)
+ ipv4_m = &nic_mask;
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
+ *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
+ *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
+ tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
+ ipv4_m->hdr.type_of_service);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
+ ipv4_m->hdr.type_of_service >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+ ipv4_m->hdr.next_proto_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv4_m->hdr.time_to_live);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
+}
+
+/**
+ * Add IPV6 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] inner
+ * Item is inner pattern.
+ * @param[in] group
+ * The group to insert the rule.
+ */
+static void
+flow_dv_translate_item_ipv6(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ const uint64_t item_flags,
+ int inner, uint32_t group)
+{
+ const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
+ const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ char *l24_m;
+ char *l24_v;
+ uint32_t vtc_m;
+ uint32_t vtc_v;
+ int i;
+ int size;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ * This should be done even if item->spec is empty.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ if (!ipv6_v)
+ return;
+ if (!ipv6_m)
+ ipv6_m = &nic_mask;
+ size = sizeof(ipv6_m->hdr.dst_addr);
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+ memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
+ for (i = 0; i < size; ++i)
+ l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ memcpy(l24_m, ipv6_m->hdr.src_addr, size);
+ for (i = 0; i < size; ++i)
+ l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
+ /* TOS. */
+ vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
+ vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
+ /* Label. */
+ if (inner) {
+ MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
+ vtc_m);
+ MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
+ vtc_v);
+ } else {
+ MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
+ vtc_m);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
+ vtc_v);
+ }
+ /* Protocol. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+ ipv6_m->hdr.proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ ipv6_v->hdr.proto & ipv6_m->hdr.proto);
+ /* Hop limit. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv6_m->hdr.hop_limits);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
+}
+
+/**
+ * Add TCP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_tcp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_tcp *tcp_m = item->mask;
+ const struct rte_flow_item_tcp *tcp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
+ if (!tcp_v)
+ return;
+ if (!tcp_m)
+ tcp_m = &rte_flow_item_tcp_mask;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
+ rte_be_to_cpu_16(tcp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
+ rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
+ rte_be_to_cpu_16(tcp_m->hdr.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
+ rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
+ tcp_m->hdr.tcp_flags);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+ (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
+}
+
+/**
+ * Add UDP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_udp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_udp *udp_m = item->mask;
+ const struct rte_flow_item_udp *udp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+ if (!udp_v)
+ return;
+ if (!udp_m)
+ udp_m = &rte_flow_item_udp_mask;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
+ rte_be_to_cpu_16(udp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
+ rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+ rte_be_to_cpu_16(udp_m->hdr.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
+}
+
+/**
+ * Add GRE optional Key item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_gre_key(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const rte_be32_t *key_m = item->mask;
+ const rte_be32_t *key_v = item->spec;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
+
+ /* GRE K bit must be on and should already be validated */
+ MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
+ if (!key_v)
+ return;
+ if (!key_m)
+ key_m = &gre_key_default_mask;
+ MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
+ rte_be_to_cpu_32(*key_m) >> 8);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
+ rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
+ MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
+ rte_be_to_cpu_32(*key_m) & 0xFF);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
+ rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
+}
+
+/**
+ * Add GRE item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_gre(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_gre *gre_m = item->mask;
+ const struct rte_flow_item_gre *gre_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ struct {
+ union {
+ __extension__
+ struct {
+ uint16_t version:3;
+ uint16_t rsvd0:9;
+ uint16_t s_present:1;
+ uint16_t k_present:1;
+ uint16_t rsvd_bit1:1;
+ uint16_t c_present:1;
+ };
+ uint16_t value;
+ };
+ } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
+ if (!gre_v)
+ return;
+ if (!gre_m)
+ gre_m = &rte_flow_item_gre_mask;
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
+ rte_be_to_cpu_16(gre_m->protocol));
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
+ gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
+ gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
+ MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
+ gre_crks_rsvd0_ver_m.c_present);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
+ gre_crks_rsvd0_ver_v.c_present &
+ gre_crks_rsvd0_ver_m.c_present);
+ MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
+ gre_crks_rsvd0_ver_m.k_present);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
+ gre_crks_rsvd0_ver_v.k_present &
+ gre_crks_rsvd0_ver_m.k_present);
+ MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
+ gre_crks_rsvd0_ver_m.s_present);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
+ gre_crks_rsvd0_ver_v.s_present &
+ gre_crks_rsvd0_ver_m.s_present);
+}
+
+/**
+ * Add NVGRE item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_nvgre(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_nvgre *nvgre_m = item->mask;
+ const struct rte_flow_item_nvgre *nvgre_v = item->spec;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ const char *tni_flow_id_m;
+ const char *tni_flow_id_v;
+ char *gre_key_m;
+ char *gre_key_v;
+ int size;
+ int i;
+
+ /* For NVGRE, GRE header fields must be set with defined values. */
+ const struct rte_flow_item_gre gre_spec = {
+ .c_rsvd0_ver = RTE_BE16(0x2000),
+ .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
+ };
+ const struct rte_flow_item_gre gre_mask = {
+ .c_rsvd0_ver = RTE_BE16(0xB000),
+ .protocol = RTE_BE16(UINT16_MAX),
+ };
+ const struct rte_flow_item gre_item = {
+ .spec = &gre_spec,
+ .mask = &gre_mask,
+ .last = NULL,
+ };
+ flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
+ if (!nvgre_v)
+ return;
+ if (!nvgre_m)
+ nvgre_m = &rte_flow_item_nvgre_mask;
+ tni_flow_id_m = (const char *)nvgre_m->tni;
+ tni_flow_id_v = (const char *)nvgre_v->tni;
+ size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
+ gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
+ gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
+ memcpy(gre_key_m, tni_flow_id_m, size);
+ for (i = 0; i < size; ++i)
+ gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
+}
+
+/**
+ * Add VXLAN item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_vxlan(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_vxlan *vxlan_m = item->mask;
+ const struct rte_flow_item_vxlan *vxlan_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ char *vni_m;
+ char *vni_v;
+ uint16_t dport;
+ int size;
+ int i;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
+ MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!vxlan_v)
+ return;
+ if (!vxlan_m)
+ vxlan_m = &rte_flow_item_vxlan_mask;
+ size = sizeof(vxlan_m->vni);
+ vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
+ vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
+ memcpy(vni_m, vxlan_m->vni, size);
+ for (i = 0; i < size; ++i)
+ vni_v[i] = vni_m[i] & vxlan_v->vni[i];
+}
+
+/**
+ * Add VXLAN-GPE item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+
+static void
+flow_dv_translate_item_vxlan_gpe(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner)
+{
+ const struct rte_flow_item_vxlan_gpe *vxlan_m = item->mask;
+ const struct rte_flow_item_vxlan_gpe *vxlan_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_3);
+ void *misc_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ char *vni_m;
+ char *vni_v;
+ uint16_t dport;
+ int size;
+ int i;
+ uint8_t flags_m = 0xff;
+ uint8_t flags_v = 0xc;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
+ MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!vxlan_v)
+ return;
+ if (!vxlan_m)
+ vxlan_m = &rte_flow_item_vxlan_gpe_mask;
+ size = sizeof(vxlan_m->vni);
+ vni_m = MLX5_ADDR_OF(fte_match_set_misc3, misc_m, outer_vxlan_gpe_vni);
+ vni_v = MLX5_ADDR_OF(fte_match_set_misc3, misc_v, outer_vxlan_gpe_vni);
+ memcpy(vni_m, vxlan_m->vni, size);
+ for (i = 0; i < size; ++i)
+ vni_v[i] = vni_m[i] & vxlan_v->vni[i];
+ if (vxlan_m->flags) {
+ flags_m = vxlan_m->flags;
+ flags_v = vxlan_v->flags;
+ }
+ MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_flags, flags_m);
+ MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_flags, flags_v);
+ MLX5_SET(fte_match_set_misc3, misc_m, outer_vxlan_gpe_next_protocol,
+ vxlan_m->protocol);
+ MLX5_SET(fte_match_set_misc3, misc_v, outer_vxlan_gpe_next_protocol,
+ vxlan_v->protocol);
+}
+
+/**
+ * Add Geneve item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+
+static void
+flow_dv_translate_item_geneve(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner)
+{
+ const struct rte_flow_item_geneve *geneve_m = item->mask;
+ const struct rte_flow_item_geneve *geneve_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ uint16_t dport;
+ uint16_t gbhdr_m;
+ uint16_t gbhdr_v;
+ char *vni_m;
+ char *vni_v;
+ size_t size, i;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ dport = MLX5_UDP_PORT_GENEVE;
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!geneve_v)
+ return;
+ if (!geneve_m)
+ geneve_m = &rte_flow_item_geneve_mask;
+ size = sizeof(geneve_m->vni);
+ vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, geneve_vni);
+ vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, geneve_vni);
+ memcpy(vni_m, geneve_m->vni, size);
+ for (i = 0; i < size; ++i)
+ vni_v[i] = vni_m[i] & geneve_v->vni[i];
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_protocol_type,
+ rte_be_to_cpu_16(geneve_m->protocol));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type,
+ rte_be_to_cpu_16(geneve_v->protocol & geneve_m->protocol));
+ gbhdr_m = rte_be_to_cpu_16(geneve_m->ver_opt_len_o_c_rsvd0);
+ gbhdr_v = rte_be_to_cpu_16(geneve_v->ver_opt_len_o_c_rsvd0);
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_oam,
+ MLX5_GENEVE_OAMF_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_oam,
+ MLX5_GENEVE_OAMF_VAL(gbhdr_v) & MLX5_GENEVE_OAMF_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_m, geneve_opt_len,
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+ MLX5_SET(fte_match_set_misc, misc_v, geneve_opt_len,
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_v) &
+ MLX5_GENEVE_OPTLEN_VAL(gbhdr_m));
+}
+
+/**
+ * Add MPLS item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] prev_layer
+ * The protocol layer indicated in previous item.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_mpls(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ uint64_t prev_layer,
+ int inner)
+{
+ const uint32_t *in_mpls_m = item->mask;
+ const uint32_t *in_mpls_v = item->spec;
+ uint32_t *out_mpls_m = 0;
+ uint32_t *out_mpls_v = 0;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_2);
+ void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+ void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+
+ switch (prev_layer) {
+ case MLX5_FLOW_LAYER_OUTER_L4_UDP:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ MLX5_UDP_PORT_MPLS);
+ break;
+ case MLX5_FLOW_LAYER_GRE:
+ MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
+ MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
+ RTE_ETHER_TYPE_MPLS);
+ break;
+ default:
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ IPPROTO_MPLS);
+ break;
+ }
+ if (!in_mpls_v)
+ return;
+ if (!in_mpls_m)
+ in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
+ switch (prev_layer) {
+ case MLX5_FLOW_LAYER_OUTER_L4_UDP:
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
+ outer_first_mpls_over_udp);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_udp);
+ break;
+ case MLX5_FLOW_LAYER_GRE:
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
+ outer_first_mpls_over_gre);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
+ outer_first_mpls_over_gre);
+ break;
+ default:
+ /* Inner MPLS not over GRE is not supported. */
+ if (!inner) {
+ out_mpls_m =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
+ misc2_m,
+ outer_first_mpls);
+ out_mpls_v =
+ (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
+ misc2_v,
+ outer_first_mpls);
+ }
+ break;
+ }
+ if (out_mpls_m && out_mpls_v) {
+ *out_mpls_m = *in_mpls_m;
+ *out_mpls_v = *in_mpls_v & *in_mpls_m;
+ }
+}
+
+/**
+ * Add metadata register item to matcher
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] reg_type
+ * Type of device metadata register
+ * @param[in] value
+ * Register value
+ * @param[in] mask
+ * Register mask
+ */
+static void
+flow_dv_match_meta_reg(void *matcher, void *key,
+ enum modify_reg reg_type,
+ uint32_t data, uint32_t mask)
+{
+ void *misc2_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
+ void *misc2_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
+ uint32_t temp;
+
+ data &= mask;
+ switch (reg_type) {
+ case REG_A:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a, data);
+ break;
+ case REG_B:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_b, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_b, data);
+ break;
+ case REG_C_0:
+ /*
+ * The metadata register C0 field might be divided into
+ * source vport index and META item value, we should set
+ * this field according to specified mask, not as whole one.
+ */
+ temp = MLX5_GET(fte_match_set_misc2, misc2_m, metadata_reg_c_0);
+ temp |= mask;
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_0, temp);
+ temp = MLX5_GET(fte_match_set_misc2, misc2_v, metadata_reg_c_0);
+ temp &= ~mask;
+ temp |= data;
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_0, temp);
+ break;
+ case REG_C_1:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_1, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_1, data);
+ break;
+ case REG_C_2:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_2, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_2, data);
+ break;
+ case REG_C_3:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_3, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_3, data);
+ break;
+ case REG_C_4:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_4, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_4, data);
+ break;
+ case REG_C_5:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_5, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_5, data);
+ break;
+ case REG_C_6:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_6, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_6, data);
+ break;
+ case REG_C_7:
+ MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_c_7, mask);
+ MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_c_7, data);
+ break;
+ default:
+ MLX5_ASSERT(false);
+ break;
+ }
+}
+
+/**
+ * Add MARK item to matcher
+ *
+ * @param[in] dev
+ * The device to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_mark(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_mark *mark;
+ uint32_t value;
+ uint32_t mask;
+
+ mark = item->mask ? (const void *)item->mask :
+ &rte_flow_item_mark_mask;
+ mask = mark->id & priv->sh->dv_mark_mask;
+ mark = (const void *)item->spec;
+ MLX5_ASSERT(mark);
+ value = mark->id & priv->sh->dv_mark_mask & mask;
+ if (mask) {
+ enum modify_reg reg;
+
+ /* Get the metadata register index for the mark. */
+ reg = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, NULL);
+ MLX5_ASSERT(reg > 0);
+ if (reg == REG_C_0) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t msk_c0 = priv->sh->dv_regc0_mask;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
+
+ mask &= msk_c0;
+ mask <<= shl_c0;
+ value <<= shl_c0;
+ }
+ flow_dv_match_meta_reg(matcher, key, reg, value, mask);
+ }
+}
+
+/**
+ * Add META item to matcher
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] attr
+ * Attributes of flow that includes this item.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_meta(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_meta *meta_m;
+ const struct rte_flow_item_meta *meta_v;
+
+ meta_m = (const void *)item->mask;
+ if (!meta_m)
+ meta_m = &rte_flow_item_meta_mask;
+ meta_v = (const void *)item->spec;
+ if (meta_v) {
+ int reg;
+ uint32_t value = meta_v->data;
+ uint32_t mask = meta_m->data;
+
+ reg = flow_dv_get_metadata_reg(dev, attr, NULL);
+ if (reg < 0)
+ return;
+ /*
+ * In datapath code there is no endianness
+ * coversions for perfromance reasons, all
+ * pattern conversions are done in rte_flow.
+ */
+ value = rte_cpu_to_be_32(value);
+ mask = rte_cpu_to_be_32(mask);
+ if (reg == REG_C_0) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t msk_c0 = priv->sh->dv_regc0_mask;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
+#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
+ uint32_t shr_c0 = __builtin_clz(priv->sh->dv_meta_mask);
+
+ value >>= shr_c0;
+ mask >>= shr_c0;
+#endif
+ value <<= shl_c0;
+ mask <<= shl_c0;
+ MLX5_ASSERT(msk_c0);
+ MLX5_ASSERT(!(~msk_c0 & mask));
+ }
+ flow_dv_match_meta_reg(matcher, key, reg, value, mask);
+ }
+}
+
+/**
+ * Add vport metadata Reg C0 item to matcher
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] reg
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_meta_vport(void *matcher, void *key,
+ uint32_t value, uint32_t mask)
+{
+ flow_dv_match_meta_reg(matcher, key, REG_C_0, value, mask);
+}
+
+/**
+ * Add tag item to matcher
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_mlx5_item_tag(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct mlx5_rte_flow_item_tag *tag_v = item->spec;
+ const struct mlx5_rte_flow_item_tag *tag_m = item->mask;
+ uint32_t mask, value;
+
+ MLX5_ASSERT(tag_v);
+ value = tag_v->data;
+ mask = tag_m ? tag_m->data : UINT32_MAX;
+ if (tag_v->id == REG_C_0) {
+ struct mlx5_priv *priv = dev->data->dev_private;
+ uint32_t msk_c0 = priv->sh->dv_regc0_mask;
+ uint32_t shl_c0 = rte_bsf32(msk_c0);
+
+ mask &= msk_c0;
+ mask <<= shl_c0;
+ value <<= shl_c0;
+ }
+ flow_dv_match_meta_reg(matcher, key, tag_v->id, value, mask);
+}
+
+/**
+ * Add TAG item to matcher
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ */
+static void
+flow_dv_translate_item_tag(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_tag *tag_v = item->spec;
+ const struct rte_flow_item_tag *tag_m = item->mask;
+ enum modify_reg reg;
+
+ MLX5_ASSERT(tag_v);
+ tag_m = tag_m ? tag_m : &rte_flow_item_tag_mask;
+ /* Get the metadata register index for the tag. */
+ reg = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, tag_v->index, NULL);
+ MLX5_ASSERT(reg > 0);
+ flow_dv_match_meta_reg(matcher, key, reg, tag_v->data, tag_m->data);
+}
+
+/**
+ * Add source vport match to the specified matcher.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] port
+ * Source vport value to match
+ * @param[in] mask
+ * Mask
+ */
+static void
+flow_dv_translate_item_source_vport(void *matcher, void *key,
+ int16_t port, uint16_t mask)
+{
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+
+ MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
+ MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
+}
+
+/**
+ * Translate port-id item to eswitch match on port-id.
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise.
+ */
+static int
+flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
+ void *key, const struct rte_flow_item *item)
+{
+ const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
+ const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
+ struct mlx5_priv *priv;
+ uint16_t mask, id;
+
+ mask = pid_m ? pid_m->id : 0xffff;
+ id = pid_v ? pid_v->id : dev->data->port_id;
+ priv = mlx5_port_to_eswitch_info(id, item == NULL);
+ if (!priv)
+ return -rte_errno;
+ /* Translate to vport field or to metadata, depending on mode. */
+ if (priv->vport_meta_mask)
+ flow_dv_translate_item_meta_vport(matcher, key,
+ priv->vport_meta_tag,
+ priv->vport_meta_mask);
+ else
+ flow_dv_translate_item_source_vport(matcher, key,
+ priv->vport_id, mask);
+ return 0;
+}
+
+/**
+ * Add ICMP6 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_icmp6(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
+ const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_3);
+ void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
+ if (!icmp6_v)
+ return;
+ if (!icmp6_m)
+ icmp6_m = &rte_flow_item_icmp6_mask;
+ /*
+ * Force flow only to match the non-fragmented IPv6 ICMPv6 packets.
+ * If only the protocol is specified, no need to match the frag.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+ MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
+ MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
+ icmp6_v->type & icmp6_m->type);
+ MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
+ MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
+ icmp6_v->code & icmp6_m->code);
+}
+
+/**
+ * Add ICMP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_icmp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_icmp *icmp_m = item->mask;
+ const struct rte_flow_item_icmp *icmp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_3);
+ void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
+ if (!icmp_v)
+ return;
+ if (!icmp_m)
+ icmp_m = &rte_flow_item_icmp_mask;
+ /*
+ * Force flow only to match the non-fragmented IPv4 ICMP packets.
+ * If only the protocol is specified, no need to match the frag.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, frag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
+ MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
+ icmp_m->hdr.icmp_type);
+ MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
+ icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
+ MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
+ icmp_m->hdr.icmp_code);
+ MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
+ icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
+}
+
+/**
+ * Add GTP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_gtp(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner)
+{
+ const struct rte_flow_item_gtp *gtp_m = item->mask;
+ const struct rte_flow_item_gtp *gtp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_3);
+ void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
+ uint16_t dport = RTE_GTPU_UDP_PORT;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
+ }
+ if (!gtp_v)
+ return;
+ if (!gtp_m)
+ gtp_m = &rte_flow_item_gtp_mask;
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_flags,
+ gtp_m->v_pt_rsv_flags);
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_flags,
+ gtp_v->v_pt_rsv_flags & gtp_m->v_pt_rsv_flags);
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_msg_type, gtp_m->msg_type);
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_msg_type,
+ gtp_v->msg_type & gtp_m->msg_type);
+ MLX5_SET(fte_match_set_misc3, misc3_m, gtpu_teid,
+ rte_be_to_cpu_32(gtp_m->teid));
+ MLX5_SET(fte_match_set_misc3, misc3_v, gtpu_teid,
+ rte_be_to_cpu_32(gtp_v->teid & gtp_m->teid));
+}
+
+/**
+ * Add eCPRI item to matcher and to the value.
+ *
+ * @param[in] dev
+ * The devich to configure through.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] samples
+ * Sample IDs to be used in the matching.
+ */
+static void
+flow_dv_translate_item_ecpri(struct rte_eth_dev *dev, void *matcher,
+ void *key, const struct rte_flow_item *item)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ const struct rte_flow_item_ecpri *ecpri_m = item->mask;
+ const struct rte_flow_item_ecpri *ecpri_v = item->spec;
+ void *misc4_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ misc_parameters_4);
+ void *misc4_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_4);
+ uint32_t *samples;
+ void *dw_m;
+ void *dw_v;
+
+ if (!ecpri_v)
+ return;
+ if (!ecpri_m)
+ ecpri_m = &rte_flow_item_ecpri_mask;
+ /*
+ * Maximal four DW samples are supported in a single matching now.
+ * Two are used now for a eCPRI matching:
+ * 1. Type: one byte, mask should be 0x00ff0000 in network order
+ * 2. ID of a message: one or two bytes, mask 0xffff0000 or 0xff000000
+ * if any.
+ */
+ if (!ecpri_m->hdr.common.u32)
+ return;
+ samples = priv->sh->fp[MLX5_FLEX_PARSER_ECPRI_0].ids;
+ /* Need to take the whole DW as the mask to fill the entry. */
+ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
+ prog_sample_field_value_0);
+ dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
+ prog_sample_field_value_0);
+ /* Already big endian (network order) in the header. */
+ *(uint32_t *)dw_m = ecpri_m->hdr.common.u32;
+ *(uint32_t *)dw_v = ecpri_v->hdr.common.u32;
+ /* Sample#0, used for matching type, offset 0. */
+ MLX5_SET(fte_match_set_misc4, misc4_m,
+ prog_sample_field_id_0, samples[0]);
+ /* It makes no sense to set the sample ID in the mask field. */
+ MLX5_SET(fte_match_set_misc4, misc4_v,
+ prog_sample_field_id_0, samples[0]);
+ /*
+ * Checking if message body part needs to be matched.
+ * Some wildcard rules only matching type field should be supported.
+ */
+ if (ecpri_m->hdr.dummy[0]) {
+ switch (ecpri_v->hdr.common.type) {
+ case RTE_ECPRI_MSG_TYPE_IQ_DATA:
+ case RTE_ECPRI_MSG_TYPE_RTC_CTRL:
+ case RTE_ECPRI_MSG_TYPE_DLY_MSR:
+ dw_m = MLX5_ADDR_OF(fte_match_set_misc4, misc4_m,
+ prog_sample_field_value_1);
+ dw_v = MLX5_ADDR_OF(fte_match_set_misc4, misc4_v,
+ prog_sample_field_value_1);
+ *(uint32_t *)dw_m = ecpri_m->hdr.dummy[0];
+ *(uint32_t *)dw_v = ecpri_v->hdr.dummy[0];
+ /* Sample#1, to match message body, offset 4. */
+ MLX5_SET(fte_match_set_misc4, misc4_m,
+ prog_sample_field_id_1, samples[1]);
+ MLX5_SET(fte_match_set_misc4, misc4_v,
+ prog_sample_field_id_1, samples[1]);
+ break;
+ default:
+ /* Others, do not match any sample ID. */
+ break;
+ }
+ }
+}
+
+static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
+
+#define HEADER_IS_ZERO(match_criteria, headers) \
+ !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
+ matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
+
+/**
+ * Calculate flow matcher enable bitmap.
+ *
+ * @param match_criteria
+ * Pointer to flow matcher criteria.
+ *
+ * @return
+ * Bitmap of enabled fields.
+ */
+static uint8_t
+flow_dv_matcher_enable(uint32_t *match_criteria)
+{
+ uint8_t match_criteria_enable;
+
+ match_criteria_enable =
+ (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_4)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC4_BIT;
+ return match_criteria_enable;
+}
+
+
+/**
+ * Get a flow table.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] table_id
+ * Table id to use.
+ * @param[in] egress
+ * Direction of the table.
+ * @param[in] transfer
+ * E-Switch or NIC flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * Returns tables resource based on the index, NULL in case of failed.
+ */
+static struct mlx5_flow_tbl_resource *
+flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
+ uint32_t table_id, uint8_t egress,
+ uint8_t transfer,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_tbl_resource *tbl;
+ union mlx5_flow_tbl_key table_key = {
+ {
+ .table_id = table_id,
+ .reserved = 0,
+ .domain = !!transfer,
+ .direction = !!egress,
+ }
+ };
+ struct mlx5_hlist_entry *pos = mlx5_hlist_lookup(sh->flow_tbls,
+ table_key.v64);
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ uint32_t idx = 0;
+ int ret;
+ void *domain;
+
+ if (pos) {
+ tbl_data = container_of(pos, struct mlx5_flow_tbl_data_entry,
+ entry);
+ tbl = &tbl_data->tbl;
+ rte_atomic32_inc(&tbl->refcnt);
+ return tbl;
+ }
+ tbl_data = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_JUMP], &idx);
+ if (!tbl_data) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "cannot allocate flow table data entry");
+ return NULL;
+ }
+ tbl_data->idx = idx;
+ tbl = &tbl_data->tbl;
+ pos = &tbl_data->entry;
+ if (transfer)
+ domain = sh->fdb_domain;
+ else if (egress)
+ domain = sh->tx_domain;
+ else
+ domain = sh->rx_domain;
+ ret = mlx5_flow_os_create_flow_tbl(domain, table_id, &tbl->obj);
+ if (ret) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create flow table object");
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
+ return NULL;
+ }
+ /*
+ * No multi-threads now, but still better to initialize the reference
+ * count before insert it into the hash list.
+ */
+ rte_atomic32_init(&tbl->refcnt);
+ /* Jump action reference count is initialized here. */
+ rte_atomic32_init(&tbl_data->jump.refcnt);
+ pos->key = table_key.v64;
+ ret = mlx5_hlist_insert(sh->flow_tbls, pos);
+ if (ret < 0) {
+ rte_flow_error_set(error, -ret,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot insert flow table data entry");
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
+ mlx5_ipool_free(sh->ipool[MLX5_IPOOL_JUMP], idx);
+ }
+ rte_atomic32_inc(&tbl->refcnt);
+ return tbl;
+}
+
+/**
+ * Release a flow table.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] tbl
+ * Table resource to be released.
+ *
+ * @return
+ * Returns 0 if table was released, else return 1;
+ */
+static int
+flow_dv_tbl_resource_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_tbl_resource *tbl)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_tbl_data_entry *tbl_data =
+ container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
+
+ if (!tbl)
+ return 0;
+ if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+ struct mlx5_hlist_entry *pos = &tbl_data->entry;
+
+ mlx5_flow_os_destroy_flow_tbl(tbl->obj);
+ tbl->obj = NULL;
+ /* remove the entry from the hash list and free memory. */
+ mlx5_hlist_remove(sh->flow_tbls, pos);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_JUMP],
+ tbl_data->idx);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Register the flow matcher.
+ *
+ * @param[in, out] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] matcher
+ * Pointer to flow matcher.
+ * @param[in, out] key
+ * Pointer to flow table key.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_matcher_register(struct rte_eth_dev *dev,
+ struct mlx5_flow_dv_matcher *matcher,
+ union mlx5_flow_tbl_key *key,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_matcher *cache_matcher;
+ struct mlx5dv_flow_matcher_attr dv_attr = {
+ .type = IBV_FLOW_ATTR_NORMAL,
+ .match_mask = (void *)&matcher->mask,
+ };
+ struct mlx5_flow_tbl_resource *tbl;
+ struct mlx5_flow_tbl_data_entry *tbl_data;
+ int ret;
+
+ tbl = flow_dv_tbl_resource_get(dev, key->table_id, key->direction,
+ key->domain, error);
+ if (!tbl)
+ return -rte_errno; /* No need to refill the error info */
+ tbl_data = container_of(tbl, struct mlx5_flow_tbl_data_entry, tbl);
+ /* Lookup from cache. */
+ LIST_FOREACH(cache_matcher, &tbl_data->matchers, next) {
+ if (matcher->crc == cache_matcher->crc &&
+ matcher->priority == cache_matcher->priority &&
+ !memcmp((const void *)matcher->mask.buf,
+ (const void *)cache_matcher->mask.buf,
+ cache_matcher->mask.size)) {
+ DRV_LOG(DEBUG,
+ "%s group %u priority %hd use %s "
+ "matcher %p: refcnt %d++",
+ key->domain ? "FDB" : "NIC", key->table_id,
+ cache_matcher->priority,
+ key->direction ? "tx" : "rx",
+ (void *)cache_matcher,
+ rte_atomic32_read(&cache_matcher->refcnt));
+ rte_atomic32_inc(&cache_matcher->refcnt);
+ dev_flow->handle->dvh.matcher = cache_matcher;
+ /* old matcher should not make the table ref++. */
+ flow_dv_tbl_resource_release(dev, tbl);
+ return 0;
+ }
+ }
+ /* Register new matcher. */
+ cache_matcher = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*cache_matcher), 0,
+ SOCKET_ID_ANY);
+ if (!cache_matcher) {
+ flow_dv_tbl_resource_release(dev, tbl);
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate matcher memory");
+ }
+ *cache_matcher = *matcher;
+ dv_attr.match_criteria_enable =
+ flow_dv_matcher_enable(cache_matcher->mask.buf);
+ dv_attr.priority = matcher->priority;
+ if (key->direction)
+ dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
+ ret = mlx5_flow_os_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj,
+ &cache_matcher->matcher_object);
+ if (ret) {
+ mlx5_free(cache_matcher);
+#ifdef HAVE_MLX5DV_DR
+ flow_dv_tbl_resource_release(dev, tbl);
+#endif
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create matcher");
+ }
+ /* Save the table information */
+ cache_matcher->tbl = tbl;
+ rte_atomic32_init(&cache_matcher->refcnt);
+ /* only matcher ref++, table ref++ already done above in get API. */
+ rte_atomic32_inc(&cache_matcher->refcnt);
+ LIST_INSERT_HEAD(&tbl_data->matchers, cache_matcher, next);
+ dev_flow->handle->dvh.matcher = cache_matcher;
+ DRV_LOG(DEBUG, "%s group %u priority %hd new %s matcher %p: refcnt %d",
+ key->domain ? "FDB" : "NIC", key->table_id,
+ cache_matcher->priority,
+ key->direction ? "tx" : "rx", (void *)cache_matcher,
+ rte_atomic32_read(&cache_matcher->refcnt));
+ return 0;
+}
+
+/**
+ * Find existing tag resource or create and register a new one.
+ *
+ * @param dev[in, out]
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] tag_be24
+ * Tag value in big endian then R-shift 8.
+ * @parm[in, out] dev_flow
+ * Pointer to the dev_flow.
+ * @param[out] error
+ * pointer to error structure.
+ *
+ * @return
+ * 0 on success otherwise -errno and errno is set.
+ */
+static int
+flow_dv_tag_resource_register
+ (struct rte_eth_dev *dev,
+ uint32_t tag_be24,
+ struct mlx5_flow *dev_flow,
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_tag_resource *cache_resource;
+ struct mlx5_hlist_entry *entry;
+ int ret;
+
+ /* Lookup a matching resource from cache. */
+ entry = mlx5_hlist_lookup(sh->tag_table, (uint64_t)tag_be24);
+ if (entry) {
+ cache_resource = container_of
+ (entry, struct mlx5_flow_dv_tag_resource, entry);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ dev_flow->handle->dvh.rix_tag = cache_resource->idx;
+ dev_flow->dv.tag_resource = cache_resource;
+ DRV_LOG(DEBUG, "cached tag resource %p: refcnt now %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
+ }
+ /* Register new resource. */
+ cache_resource = mlx5_ipool_zmalloc(sh->ipool[MLX5_IPOOL_TAG],
+ &dev_flow->handle->dvh.rix_tag);
+ if (!cache_resource)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "cannot allocate resource memory");
+ cache_resource->entry.key = (uint64_t)tag_be24;
+ ret = mlx5_flow_os_create_flow_action_tag(tag_be24,
+ &cache_resource->action);
+ if (ret) {
+ mlx5_free(cache_resource);
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create action");
+ }
+ rte_atomic32_init(&cache_resource->refcnt);
+ rte_atomic32_inc(&cache_resource->refcnt);
+ if (mlx5_hlist_insert(sh->tag_table, &cache_resource->entry)) {
+ mlx5_flow_os_destroy_flow_action(cache_resource->action);
+ mlx5_free(cache_resource);
+ return rte_flow_error_set(error, EEXIST,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot insert tag");
+ }
+ dev_flow->dv.tag_resource = cache_resource;
+ DRV_LOG(DEBUG, "new tag resource %p: refcnt now %d++",
+ (void *)cache_resource,
+ rte_atomic32_read(&cache_resource->refcnt));
+ return 0;
+}
+
+/**
+ * Release the tag.
+ *
+ * @param dev
+ * Pointer to Ethernet device.
+ * @param tag_idx
+ * Tag index.
+ *
+ * @return
+ * 1 while a reference on it exists, 0 when freed.
+ */
+static int
+flow_dv_tag_release(struct rte_eth_dev *dev,
+ uint32_t tag_idx)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_ctx_shared *sh = priv->sh;
+ struct mlx5_flow_dv_tag_resource *tag;
+
+ tag = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
+ if (!tag)
+ return 0;
+ DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
+ dev->data->port_id, (void *)tag,
+ rte_atomic32_read(&tag->refcnt));
+ if (rte_atomic32_dec_and_test(&tag->refcnt)) {
+ claim_zero(mlx5_flow_os_destroy_flow_action(tag->action));
+ mlx5_hlist_remove(sh->tag_table, &tag->entry);
+ DRV_LOG(DEBUG, "port %u tag %p: removed",
+ dev->data->port_id, (void *)tag);
+ mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TAG], tag_idx);
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * Translate port ID action to vport.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in] action
+ * Pointer to the port ID action.
+ * @param[out] dst_port_id
+ * The target port ID.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
+ const struct rte_flow_action *action,
+ uint32_t *dst_port_id,
+ struct rte_flow_error *error)
+{
+ uint32_t port;
+ struct mlx5_priv *priv;
+ const struct rte_flow_action_port_id *conf =
+ (const struct rte_flow_action_port_id *)action->conf;
+
+ port = conf->original ? dev->data->port_id : conf->id;
+ priv = mlx5_port_to_eswitch_info(port, false);
+ if (!priv)
+ return rte_flow_error_set(error, -rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "No eswitch info was found for port");
+#ifdef HAVE_MLX5DV_DR_DEVX_PORT
+ /*
+ * This parameter is transferred to
+ * mlx5dv_dr_action_create_dest_ib_port().
+ */
+ *dst_port_id = priv->dev_port;
+#else
+ /*
+ * Legacy mode, no LAG configurations is supported.
+ * This parameter is transferred to
+ * mlx5dv_dr_action_create_dest_vport().
+ */
+ *dst_port_id = priv->vport_id;
+#endif
+ return 0;
+}
+
+/**
+ * Create a counter with aging configuration.
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[out] count
+ * Pointer to the counter action configuration.
+ * @param[in] age
+ * Pointer to the aging action configuration.
+ *
+ * @return
+ * Index to flow counter on success, 0 otherwise.
+ */
+static uint32_t
+flow_dv_translate_create_counter(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_action_count *count,
+ const struct rte_flow_action_age *age)
+{
+ uint32_t counter;
+ struct mlx5_age_param *age_param;
+
+ counter = flow_dv_counter_alloc(dev,
+ count ? count->shared : 0,
+ count ? count->id : 0,
+ dev_flow->dv.group, !!age);
+ if (!counter || age == NULL)
+ return counter;
+ age_param = flow_dv_counter_idx_get_age(dev, counter);
+ /*
+ * The counter age accuracy may have a bit delay. Have 3/4
+ * second bias on the timeount in order to let it age in time.
+ */
+ age_param->context = age->context ? age->context :
+ (void *)(uintptr_t)(dev_flow->flow_idx);
+ /*
+ * The counter age accuracy may have a bit delay. Have 3/4
+ * second bias on the timeount in order to let it age in time.
+ */
+ age_param->timeout = age->timeout * 10 - MLX5_AGING_TIME_DELAY;
+ /* Set expire time in unit of 0.1 sec. */
+ age_param->port_id = dev->data->port_id;
+ age_param->expire = age_param->timeout +
+ rte_rdtsc() / (rte_get_tsc_hz() / 10);
+ rte_atomic16_set(&age_param->state, AGE_CANDIDATE);
+ return counter;
+}
+/**
+ * Add Tx queue matcher
+ *
+ * @param[in] dev
+ * Pointer to the dev struct.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_tx_queue(struct rte_eth_dev *dev,
+ void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const struct mlx5_rte_flow_item_tx_queue *queue_m;
+ const struct mlx5_rte_flow_item_tx_queue *queue_v;
+ void *misc_m =
+ MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v =
+ MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ struct mlx5_txq_ctrl *txq;
+ uint32_t queue;
+
+
+ queue_m = (const void *)item->mask;
+ if (!queue_m)
+ return;
+ queue_v = (const void *)item->spec;
+ if (!queue_v)
+ return;
+ txq = mlx5_txq_get(dev, queue_v->queue);
+ if (!txq)
+ return;
+ queue = txq->obj->sq->id;
+ MLX5_SET(fte_match_set_misc, misc_m, source_sqn, queue_m->queue);
+ MLX5_SET(fte_match_set_misc, misc_v, source_sqn,
+ queue & queue_m->queue);
+ mlx5_txq_release(dev, queue_v->queue);
+}
+
+/**
+ * Set the hash fields according to the @p flow information.
+ *
+ * @param[in] dev_flow
+ * Pointer to the mlx5_flow.
+ * @param[in] rss_desc
+ * Pointer to the mlx5_flow_rss_desc.
+ */
+static void
+flow_dv_hashfields_set(struct mlx5_flow *dev_flow,
+ struct mlx5_flow_rss_desc *rss_desc)
+{
+ uint64_t items = dev_flow->handle->layers;
+ int rss_inner = 0;
+ uint64_t rss_types = rte_eth_rss_hf_refine(rss_desc->types);
+
+ dev_flow->hash_fields = 0;
+#ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
+ if (rss_desc->level >= 2) {
+ dev_flow->hash_fields |= IBV_RX_HASH_INNER;
+ rss_inner = 1;
+ }
+#endif
+ if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV4)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV4))) {
+ if (rss_types & MLX5_IPV4_LAYER_TYPES) {
+ if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV4;
+ else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV4;
+ else
+ dev_flow->hash_fields |= MLX5_IPV4_IBV_RX_HASH;
+ }
+ } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L3_IPV6)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L3_IPV6))) {
+ if (rss_types & MLX5_IPV6_LAYER_TYPES) {
+ if (rss_types & ETH_RSS_L3_SRC_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_SRC_IPV6;
+ else if (rss_types & ETH_RSS_L3_DST_ONLY)
+ dev_flow->hash_fields |= IBV_RX_HASH_DST_IPV6;
+ else
+ dev_flow->hash_fields |= MLX5_IPV6_IBV_RX_HASH;
+ }
+ }
+ if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_UDP)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_UDP))) {
+ if (rss_types & ETH_RSS_UDP) {
+ if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_SRC_PORT_UDP;
+ else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_DST_PORT_UDP;
+ else
+ dev_flow->hash_fields |= MLX5_UDP_IBV_RX_HASH;
+ }
+ } else if ((rss_inner && (items & MLX5_FLOW_LAYER_INNER_L4_TCP)) ||
+ (!rss_inner && (items & MLX5_FLOW_LAYER_OUTER_L4_TCP))) {
+ if (rss_types & ETH_RSS_TCP) {
+ if (rss_types & ETH_RSS_L4_SRC_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_SRC_PORT_TCP;
+ else if (rss_types & ETH_RSS_L4_DST_ONLY)
+ dev_flow->hash_fields |=
+ IBV_RX_HASH_DST_PORT_TCP;
+ else
+ dev_flow->hash_fields |= MLX5_TCP_IBV_RX_HASH;
+ }
+ }
+}
+
+/**
+ * Fill the flow with DV spec, lock free
+ * (mutex should be acquired by caller).
+ *
+ * @param[in] dev
+ * Pointer to rte_eth_dev structure.
+ * @param[in, out] dev_flow
+ * Pointer to the sub flow.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * 0 on success, a negative errno value otherwise and rte_errno is set.
+ */
+static int
+__flow_dv_translate(struct rte_eth_dev *dev,
+ struct mlx5_flow *dev_flow,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item items[],
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error)
+{
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_dev_config *dev_conf = &priv->config;
+ struct rte_flow *flow = dev_flow->flow;
+ struct mlx5_flow_handle *handle = dev_flow->handle;
+ struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
+ priv->rss_desc)
+ [!!priv->flow_nested_idx];
+ uint64_t item_flags = 0;
+ uint64_t last_item = 0;
+ uint64_t action_flags = 0;
+ uint64_t priority = attr->priority;
+ struct mlx5_flow_dv_matcher matcher = {
+ .mask = {
+ .size = sizeof(matcher.mask.buf) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4),
+ },
+ };
+ int actions_n = 0;
+ bool actions_end = false;
+ union {
+ struct mlx5_flow_dv_modify_hdr_resource res;
+ uint8_t len[sizeof(struct mlx5_flow_dv_modify_hdr_resource) +
+ sizeof(struct mlx5_modification_cmd) *
+ (MLX5_MAX_MODIFY_NUM + 1)];
+ } mhdr_dummy;
+ struct mlx5_flow_dv_modify_hdr_resource *mhdr_res = &mhdr_dummy.res;
+ const struct rte_flow_action_count *count = NULL;
+ const struct rte_flow_action_age *age = NULL;
+ union flow_dv_attr flow_attr = { .attr = 0 };
+ uint32_t tag_be;
+ union mlx5_flow_tbl_key tbl_key;
+ uint32_t modify_action_position = UINT32_MAX;
+ void *match_mask = matcher.mask.buf;
+ void *match_value = dev_flow->dv.value.buf;
+ uint8_t next_protocol = 0xff;
+ struct rte_vlan_hdr vlan = { 0 };
+ uint32_t table;
+ int ret = 0;
+
+ mhdr_res->ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
+ MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
+ ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
+ !!priv->fdb_def_rule, &table, error);
+ if (ret)
+ return ret;
+ dev_flow->dv.group = table;
+ if (attr->transfer)
+ mhdr_res->ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
+ if (priority == MLX5_FLOW_PRIO_RSVD)
+ priority = dev_conf->flow_prio - 1;
+ /* number of actions must be set to 0 in case of dirty stack. */
+ mhdr_res->actions_num = 0;
+ for (; !actions_end ; actions++) {
+ const struct rte_flow_action_queue *queue;
+ const struct rte_flow_action_rss *rss;
+ const struct rte_flow_action *action = actions;
+ const uint8_t *rss_key;
+ const struct rte_flow_action_jump *jump_data;
+ const struct rte_flow_action_meter *mtr;
+ struct mlx5_flow_tbl_resource *tbl;
+ uint32_t port_id = 0;
+ struct mlx5_flow_dv_port_id_action_resource port_id_resource;
+ int action_type = actions->type;
+ const struct rte_flow_action *found_action = NULL;
+ struct mlx5_flow_meter *fm = NULL;
+
+ if (!mlx5_flow_os_action_supported(action_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ switch (action_type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ if (flow_dv_translate_action_port_id(dev, action,
+ &port_id, error))
+ return -rte_errno;
+ port_id_resource.port_id = port_id;
+ MLX5_ASSERT(!handle->rix_port_id_action);
+ if (flow_dv_port_id_action_resource_register
+ (dev, &port_id_resource, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.port_id_action->action;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_PORT_ID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ dev_flow->handle->mark = 1;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ struct rte_flow_action_mark mark = {
+ .id = MLX5_FLOW_MARK_DEFAULT,
+ };
+
+ if (flow_dv_convert_action_mark(dev, &mark,
+ mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ tag_be = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
+ /*
+ * Only one FLAG or MARK is supported per device flow
+ * right now. So the pointer to the tag resource must be
+ * zero before the register process.
+ */
+ MLX5_ASSERT(!handle->dvh.rix_tag);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.tag_resource->action;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ dev_flow->handle->mark = 1;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ const struct rte_flow_action_mark *mark =
+ (const struct rte_flow_action_mark *)
+ actions->conf;
+
+ if (flow_dv_convert_action_mark(dev, mark,
+ mhdr_res,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_MARK_EXT;
+ break;
+ }
+ /* Fall-through */
+ case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
+ /* Legacy (non-extensive) MARK action. */
+ tag_be = mlx5_flow_mark_set
+ (((const struct rte_flow_action_mark *)
+ (actions->conf))->id);
+ MLX5_ASSERT(!handle->dvh.rix_tag);
+ if (flow_dv_tag_resource_register(dev, tag_be,
+ dev_flow, error))
+ return -rte_errno;
+ MLX5_ASSERT(dev_flow->dv.tag_resource);
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.tag_resource->action;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ if (flow_dv_convert_action_set_meta
+ (dev, mhdr_res, attr,
+ (const struct rte_flow_action_set_meta *)
+ actions->conf, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_META;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ if (flow_dv_convert_action_set_tag
+ (dev, mhdr_res,
+ (const struct rte_flow_action_set_tag *)
+ actions->conf, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_DROP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ queue = actions->conf;
+ rss_desc->queue_num = 1;
+ rss_desc->queue[0] = queue->index;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
+ memcpy(rss_desc->queue, rss->queue,
+ rss->queue_num * sizeof(uint16_t));
+ rss_desc->queue_num = rss->queue_num;
+ /* NULL RSS key indicates default RSS key. */
+ rss_key = !rss->key ? rss_hash_default_key : rss->key;
+ memcpy(rss_desc->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /*
+ * rss->level and rss.types should be set in advance
+ * when expanding items for RSS.
+ */
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_QUEUE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ if (!dev_conf->devx) {
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "count action not supported");
+ }
+ /* Save information first, will apply later. */
+ if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT)
+ count = action->conf;
+ else
+ age = action->conf;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ dev_flow->dv.actions[actions_n++] =
+ priv->sh->pop_vlan_action;
+ action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ if (!(action_flags &
+ MLX5_FLOW_ACTION_OF_SET_VLAN_VID))
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ vlan.eth_proto = rte_be_to_cpu_16
+ ((((const struct rte_flow_action_of_push_vlan *)
+ actions->conf)->ethertype));
+ found_action = mlx5_flow_find_action
+ (actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID);
+ if (found_action)
+ mlx5_update_vlan_vid_pcp(found_action, &vlan);
+ found_action = mlx5_flow_find_action
+ (actions + 1,
+ RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP);
+ if (found_action)
+ mlx5_update_vlan_vid_pcp(found_action, &vlan);
+ if (flow_dv_create_action_push_vlan
+ (dev, attr, &vlan, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.push_vlan_res->action;
+ action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ /* of_vlan_push action handled this action */
+ MLX5_ASSERT(action_flags &
+ MLX5_FLOW_ACTION_OF_PUSH_VLAN);
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ break;
+ flow_dev_get_vlan_info_from_items(items, &vlan);
+ mlx5_update_vlan_vid_pcp(actions, &vlan);
+ /* If no VLAN push - this is a modify header action */
+ if (flow_dv_convert_action_modify_vlan_vid
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ if (flow_dv_create_action_l2_encap(dev, actions,
+ dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ if (flow_dv_create_action_l2_decap(dev, dev_flow,
+ attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ /* Handle encap with preceding decap. */
+ if (action_flags & MLX5_FLOW_ACTION_DECAP) {
+ if (flow_dv_create_action_raw_encap
+ (dev, actions, dev_flow, attr, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ } else {
+ /* Handle encap without preceding decap. */
+ if (flow_dv_create_action_l2_encap
+ (dev, actions, dev_flow, attr->transfer,
+ error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ }
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ while ((++action)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ if (flow_dv_create_action_l2_decap
+ (dev, dev_flow, attr->transfer, error))
+ return -rte_errno;
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.encap_decap->action;
+ }
+ /* If decap is followed by encap, handle it at encap. */
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ jump_data = action->conf;
+ ret = mlx5_flow_group_to_table(attr, dev_flow->external,
+ jump_data->group,
+ !!priv->fdb_def_rule,
+ &table, error);
+ if (ret)
+ return ret;
+ tbl = flow_dv_tbl_resource_get(dev, table,
+ attr->egress,
+ attr->transfer, error);
+ if (!tbl)
+ return rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create jump action.");
+ if (flow_dv_jump_tbl_resource_register
+ (dev, tbl, dev_flow, error)) {
+ flow_dv_tbl_resource_release(dev, tbl);
+ return rte_flow_error_set
+ (error, errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create jump action.");
+ }
+ dev_flow->dv.actions[actions_n++] =
+ dev_flow->dv.jump->action;
+ action_flags |= MLX5_FLOW_ACTION_JUMP;
+ dev_flow->handle->fate_action = MLX5_FLOW_FATE_JUMP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ if (flow_dv_convert_action_modify_mac
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
+ MLX5_FLOW_ACTION_SET_MAC_SRC :
+ MLX5_FLOW_ACTION_SET_MAC_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ if (flow_dv_convert_action_modify_ipv4
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV4_SRC :
+ MLX5_FLOW_ACTION_SET_IPV4_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ if (flow_dv_convert_action_modify_ipv6
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV6_SRC :
+ MLX5_FLOW_ACTION_SET_IPV6_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ if (flow_dv_convert_action_modify_tp
+ (mhdr_res, actions, items,
+ &flow_attr, dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
+ MLX5_FLOW_ACTION_SET_TP_SRC :
+ MLX5_FLOW_ACTION_SET_TP_DST;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ if (flow_dv_convert_action_modify_dec_ttl
+ (mhdr_res, items, &flow_attr, dev_flow,
+ !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ if (flow_dv_convert_action_modify_ttl
+ (mhdr_res, actions, items, &flow_attr,
+ dev_flow, !!(action_flags &
+ MLX5_FLOW_ACTION_DECAP), error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+ if (flow_dv_convert_action_modify_tcp_seq
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
+ MLX5_FLOW_ACTION_INC_TCP_SEQ :
+ MLX5_FLOW_ACTION_DEC_TCP_SEQ;
+ break;
+
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+ if (flow_dv_convert_action_modify_tcp_ack
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
+ MLX5_FLOW_ACTION_INC_TCP_ACK :
+ MLX5_FLOW_ACTION_DEC_TCP_ACK;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
+ if (flow_dv_convert_action_set_reg
+ (mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
+ if (flow_dv_convert_action_copy_mreg
+ (dev, mhdr_res, actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ dev_flow->handle->fate_action =
+ MLX5_FLOW_FATE_DEFAULT_MISS;
+ break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ mtr = actions->conf;
+ if (!flow->meter) {
+ fm = mlx5_flow_meter_attach(priv, mtr->mtr_id,
+ attr, error);
+ if (!fm)
+ return rte_flow_error_set(error,
+ rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "meter not found "
+ "or invalid parameters");
+ flow->meter = fm->idx;
+ }
+ /* Set the meter action. */
+ if (!fm) {
+ fm = mlx5_ipool_get(priv->sh->ipool
+ [MLX5_IPOOL_MTR], flow->meter);
+ if (!fm)
+ return rte_flow_error_set(error,
+ rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "meter not found "
+ "or invalid parameters");
+ }
+ dev_flow->dv.actions[actions_n++] =
+ fm->mfts->meter_action;
+ action_flags |= MLX5_FLOW_ACTION_METER;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+ if (flow_dv_convert_action_modify_ipv4_dscp(mhdr_res,
+ actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ if (flow_dv_convert_action_modify_ipv6_dscp(mhdr_res,
+ actions, error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_END:
+ actions_end = true;
+ if (mhdr_res->actions_num) {
+ /* create modify action if needed. */
+ if (flow_dv_modify_hdr_resource_register
+ (dev, mhdr_res, dev_flow, error))
+ return -rte_errno;
+ dev_flow->dv.actions[modify_action_position] =
+ handle->dvh.modify_hdr->action;
+ }
+ if (action_flags & MLX5_FLOW_ACTION_COUNT) {
+ flow->counter =
+ flow_dv_translate_create_counter(dev,
+ dev_flow, count, age);
+
+ if (!flow->counter)
+ return rte_flow_error_set
+ (error, rte_errno,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "cannot create counter"
+ " object.");
+ dev_flow->dv.actions[actions_n++] =
+ (flow_dv_counter_get_by_idx(dev,
+ flow->counter, NULL))->action;
+ }
+ break;
+ default:
+ break;
+ }
+ if (mhdr_res->actions_num &&
+ modify_action_position == UINT32_MAX)
+ modify_action_position = actions_n++;
+ }
+ dev_flow->dv.actions_n = actions_n;
+ dev_flow->act_flags = action_flags;
+ for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
+ int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
+ int item_type = items->type;
+
+ if (!mlx5_flow_os_item_supported(item_type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_PORT_ID:
+ flow_dv_translate_item_port_id(dev, match_mask,
+ match_value, items);
+ last_item = MLX5_FLOW_ITEM_PORT_ID;
+ break;