+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ ret = flow_dv_validate_item_vlan(items, item_flags,
+ dev, error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
+ MLX5_FLOW_LAYER_OUTER_VLAN;
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_vlan *)
+ items->spec)->inner_type;
+ ether_type &=
+ ((const struct rte_flow_item_vlan *)
+ items->mask)->inner_type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
+ /* Store outer VLAN mask for of_push_vlan action. */
+ if (!tunnel)
+ vlan_m = items->mask;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
+ ret = mlx5_flow_validate_item_ipv4(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ipv4_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV4;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv4 *)
+ items->mask)->hdr.next_proto_id) {
+ next_protocol =
+ ((const struct rte_flow_item_ipv4 *)
+ (items->spec))->hdr.next_proto_id;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv4 *)
+ (items->mask))->hdr.next_proto_id;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ mlx5_flow_tunnel_ip_check(items, next_protocol,
+ &item_flags, &tunnel);
+ ret = mlx5_flow_validate_item_ipv6(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ipv6_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
+ MLX5_FLOW_LAYER_OUTER_L3_IPV6;
+ if (items->mask != NULL &&
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto) {
+ item_ipv6_proto =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ next_protocol =
+ ((const struct rte_flow_item_ipv6 *)
+ items->spec)->hdr.proto;
+ next_protocol &=
+ ((const struct rte_flow_item_ipv6 *)
+ items->mask)->hdr.proto;
+ } else {
+ /* Reset for inner layer. */
+ next_protocol = 0xff;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ ret = mlx5_flow_validate_item_tcp
+ (items, item_flags,
+ next_protocol,
+ &nic_tcp_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
+ MLX5_FLOW_LAYER_OUTER_L4_TCP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ ret = mlx5_flow_validate_item_udp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
+ MLX5_FLOW_LAYER_OUTER_L4_UDP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ ret = mlx5_flow_validate_item_gre(items, item_flags,
+ next_protocol, error);
+ if (ret < 0)
+ return ret;
+ gre_item = items;
+ last_item = MLX5_FLOW_LAYER_GRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ ret = mlx5_flow_validate_item_nvgre(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_NVGRE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GRE_KEY:
+ ret = mlx5_flow_validate_item_gre_key
+ (items, item_flags, gre_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GRE_KEY;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ ret = mlx5_flow_validate_item_vxlan(items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_VXLAN;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ ret = mlx5_flow_validate_item_vxlan_gpe(items,
+ item_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ ret = mlx5_flow_validate_item_geneve(items,
+ item_flags, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GENEVE;
+ break;
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ ret = mlx5_flow_validate_item_mpls(dev, items,
+ item_flags,
+ last_item, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_MPLS;
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_MARK:
+ ret = flow_dv_validate_item_mark(dev, items, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_MARK;
+ break;
+ case RTE_FLOW_ITEM_TYPE_META:
+ ret = flow_dv_validate_item_meta(dev, items, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_METADATA;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ ret = mlx5_flow_validate_item_icmp(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_ICMP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ICMP6:
+ ret = mlx5_flow_validate_item_icmp6(items, item_flags,
+ next_protocol,
+ error);
+ if (ret < 0)
+ return ret;
+ item_ipv6_proto = IPPROTO_ICMPV6;
+ last_item = MLX5_FLOW_LAYER_ICMP6;
+ break;
+ case RTE_FLOW_ITEM_TYPE_TAG:
+ ret = flow_dv_validate_item_tag(dev, items,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_ITEM_TAG;
+ break;
+ case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
+ case MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE:
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTP:
+ ret = flow_dv_validate_item_gtp(dev, items, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_GTP;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ECPRI:
+ /* Capacity will be checked in the translate stage. */
+ ret = mlx5_flow_validate_item_ecpri(items, item_flags,
+ last_item,
+ ether_type,
+ &nic_ecpri_mask,
+ error);
+ if (ret < 0)
+ return ret;
+ last_item = MLX5_FLOW_LAYER_ECPRI;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL, "item not supported");
+ }
+ item_flags |= last_item;
+ }
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ int type = actions->type;
+
+ if (!mlx5_flow_os_action_supported(type))
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "too many actions");
+ switch (type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_PORT_ID:
+ ret = flow_dv_validate_action_port_id(dev,
+ action_flags,
+ actions,
+ attr,
+ error);
+ if (ret)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_PORT_ID;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_FLAG:
+ ret = flow_dv_validate_action_flag(dev, action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ /* Count all modify-header actions as one. */
+ if (!(action_flags &
+ MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_FLAG |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ } else {
+ action_flags |= MLX5_FLOW_ACTION_FLAG;
+ ++actions_n;
+ }
+ rw_act_num += MLX5_ACT_NUM_SET_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ ret = flow_dv_validate_action_mark(dev, actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ if (dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY) {
+ /* Count all modify-header actions as one. */
+ if (!(action_flags &
+ MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_MARK |
+ MLX5_FLOW_ACTION_MARK_EXT;
+ } else {
+ action_flags |= MLX5_FLOW_ACTION_MARK;
+ ++actions_n;
+ }
+ rw_act_num += MLX5_ACT_NUM_SET_MARK;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_META:
+ ret = flow_dv_validate_action_set_meta(dev, actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_META;
+ rw_act_num += MLX5_ACT_NUM_SET_META;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TAG:
+ ret = flow_dv_validate_action_set_tag(dev, actions,
+ action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_TAG;
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ ret = mlx5_flow_validate_action_drop(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DROP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ ret = mlx5_flow_validate_action_queue(actions,
+ action_flags, dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ queue_index = ((const struct rte_flow_action_queue *)
+ (actions->conf))->index;
+ action_flags |= MLX5_FLOW_ACTION_QUEUE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rss = actions->conf;
+ ret = mlx5_flow_validate_action_rss(actions,
+ action_flags, dev,
+ attr, item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ if (rss != NULL && rss->queue_num)
+ queue_index = rss->queue[0];
+ action_flags |= MLX5_FLOW_ACTION_RSS;
+ ++actions_n;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS:
+ ret =
+ mlx5_flow_validate_action_default_miss(action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DEFAULT_MISS;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ ret = flow_dv_validate_action_count(dev, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_COUNT;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
+ if (flow_dv_validate_action_pop_vlan(dev,
+ action_flags,
+ actions,
+ item_flags, attr,
+ error))
+ return -rte_errno;
+ action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
+ ret = flow_dv_validate_action_push_vlan(dev,
+ action_flags,
+ vlan_m,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
+ ret = flow_dv_validate_action_set_vlan_pcp
+ (action_flags, actions, error);
+ if (ret < 0)
+ return ret;
+ /* Count PCP with push_vlan command. */
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_PCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
+ ret = flow_dv_validate_action_set_vlan_vid
+ (item_flags, action_flags,
+ actions, error);
+ if (ret < 0)
+ return ret;
+ /* Count VID with push_vlan command. */
+ action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
+ rw_act_num += MLX5_ACT_NUM_MDF_VID;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
+ ret = flow_dv_validate_action_l2_encap(dev,
+ action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_ENCAP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
+ ret = flow_dv_validate_action_decap(dev, action_flags,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_DECAP;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev, NULL, actions->conf, attr, &action_flags,
+ &actions_n, error);
+ if (ret < 0)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
+ decap = actions->conf;
+ while ((++actions)->type == RTE_FLOW_ACTION_TYPE_VOID)
+ ;
+ if (actions->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
+ encap = NULL;
+ actions--;
+ } else {
+ encap = actions->conf;
+ }
+ ret = flow_dv_validate_action_raw_encap_decap
+ (dev,
+ decap ? decap : &empty_decap, encap,
+ attr, &action_flags, &actions_n,
+ error);
+ if (ret < 0)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
+ ret = flow_dv_validate_action_modify_mac(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
+ MLX5_FLOW_ACTION_SET_MAC_SRC :
+ MLX5_FLOW_ACTION_SET_MAC_DST;
+ /*
+ * Even if the source and destination MAC addresses have
+ * overlap in the header with 4B alignment, the convert
+ * function will handle them separately and 4 SW actions
+ * will be created. And 2 actions will be added each
+ * time no matter how many bytes of address will be set.
+ */
+ rw_act_num += MLX5_ACT_NUM_MDF_MAC;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
+ ret = flow_dv_validate_action_modify_ipv4(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV4_SRC :
+ MLX5_FLOW_ACTION_SET_IPV4_DST;
+ rw_act_num += MLX5_ACT_NUM_MDF_IPV4;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
+ ret = flow_dv_validate_action_modify_ipv6(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ if (item_ipv6_proto == IPPROTO_ICMPV6)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Can't change header "
+ "with ICMPv6 proto");
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
+ MLX5_FLOW_ACTION_SET_IPV6_SRC :
+ MLX5_FLOW_ACTION_SET_IPV6_DST;
+ rw_act_num += MLX5_ACT_NUM_MDF_IPV6;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
+ case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
+ ret = flow_dv_validate_action_modify_tp(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
+ MLX5_FLOW_ACTION_SET_TP_SRC :
+ MLX5_FLOW_ACTION_SET_TP_DST;
+ rw_act_num += MLX5_ACT_NUM_MDF_PORT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DEC_TTL:
+ case RTE_FLOW_ACTION_TYPE_SET_TTL:
+ ret = flow_dv_validate_action_modify_ttl(action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_SET_TTL ?
+ MLX5_FLOW_ACTION_SET_TTL :
+ MLX5_FLOW_ACTION_DEC_TTL;
+ rw_act_num += MLX5_ACT_NUM_MDF_TTL;
+ break;
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ ret = flow_dv_validate_action_jump(actions,
+ action_flags,
+ attr, external,
+ error);
+ if (ret)
+ return ret;
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_JUMP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
+ ret = flow_dv_validate_action_modify_tcp_seq
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
+ MLX5_FLOW_ACTION_INC_TCP_SEQ :
+ MLX5_FLOW_ACTION_DEC_TCP_SEQ;
+ rw_act_num += MLX5_ACT_NUM_MDF_TCPSEQ;
+ break;
+ case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
+ case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
+ ret = flow_dv_validate_action_modify_tcp_ack
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= actions->type ==
+ RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
+ MLX5_FLOW_ACTION_INC_TCP_ACK :
+ MLX5_FLOW_ACTION_DEC_TCP_ACK;
+ rw_act_num += MLX5_ACT_NUM_MDF_TCPACK;
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_MARK:
+ break;
+ case MLX5_RTE_FLOW_ACTION_TYPE_TAG:
+ case MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG:
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_METER:
+ ret = mlx5_flow_validate_action_meter(dev,
+ action_flags,
+ actions, attr,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_METER;
+ ++actions_n;
+ /* Meter action will add one more TAG action. */
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ break;
+ case RTE_FLOW_ACTION_TYPE_AGE:
+ ret = flow_dv_validate_action_age(action_flags,
+ actions, dev,
+ error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_AGE;
+ ++actions_n;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
+ ret = flow_dv_validate_action_modify_ipv4_dscp
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV4_DSCP;
+ rw_act_num += MLX5_ACT_NUM_SET_DSCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
+ ret = flow_dv_validate_action_modify_ipv6_dscp
+ (action_flags,
+ actions,
+ item_flags,
+ error);
+ if (ret < 0)
+ return ret;
+ /* Count all modify-header actions as one action. */
+ if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
+ ++actions_n;
+ action_flags |= MLX5_FLOW_ACTION_SET_IPV6_DSCP;
+ rw_act_num += MLX5_ACT_NUM_SET_DSCP;
+ break;
+ case RTE_FLOW_ACTION_TYPE_SAMPLE:
+ ret = flow_dv_validate_action_sample(action_flags,
+ actions, dev,
+ attr, error);
+ if (ret < 0)
+ return ret;
+ action_flags |= MLX5_FLOW_ACTION_SAMPLE;
+ ++actions_n;
+ break;
+ default:
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "action not supported");
+ }
+ }
+ /*
+ * Validate the drop action mutual exclusion with other actions.
+ * Drop action is mutually-exclusive with any other action, except for
+ * Count action.
+ */
+ if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
+ (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Drop action is mutually-exclusive "
+ "with any other action, except for "
+ "Count action");
+ /* Eswitch has few restrictions on using items and actions */
+ if (attr->transfer) {
+ if (!mlx5_flow_ext_mreg_supported(dev) &&
+ action_flags & MLX5_FLOW_ACTION_FLAG)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action FLAG");
+ if (!mlx5_flow_ext_mreg_supported(dev) &&
+ action_flags & MLX5_FLOW_ACTION_MARK)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action MARK");
+ if (action_flags & MLX5_FLOW_ACTION_QUEUE)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action QUEUE");
+ if (action_flags & MLX5_FLOW_ACTION_RSS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "unsupported action RSS");
+ if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no fate action is found");
+ } else {
+ if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "no fate action is found");
+ }
+ /* Continue validation for Xcap and VLAN actions.*/
+ if ((action_flags & (MLX5_FLOW_XCAP_ACTIONS |
+ MLX5_FLOW_VLAN_ACTIONS)) &&
+ (queue_index == 0xFFFF ||
+ mlx5_rxq_get_type(dev, queue_index) != MLX5_RXQ_TYPE_HAIRPIN)) {
+ if ((action_flags & MLX5_FLOW_XCAP_ACTIONS) ==
+ MLX5_FLOW_XCAP_ACTIONS)
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap and decap "
+ "combination aren't supported");
+ if (!attr->transfer && attr->ingress) {
+ if (action_flags & MLX5_FLOW_ACTION_ENCAP)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "encap is not supported"
+ " for ingress traffic");
+ else if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "push VLAN action not "
+ "supported for ingress");
+ else if ((action_flags & MLX5_FLOW_VLAN_ACTIONS) ==
+ MLX5_FLOW_VLAN_ACTIONS)
+ return rte_flow_error_set
+ (error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "no support for "
+ "multiple VLAN actions");
+ }
+ }
+ /* Hairpin flow will add one more TAG action. */
+ if (hairpin > 0)
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ /* extra metadata enabled: one more TAG action will be add. */
+ if (dev_conf->dv_flow_en &&
+ dev_conf->dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
+ mlx5_flow_ext_mreg_supported(dev))
+ rw_act_num += MLX5_ACT_NUM_SET_TAG;
+ if ((uint32_t)rw_act_num >
+ flow_dv_modify_hdr_action_max(dev, is_root)) {
+ return rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL, "too many header modify"
+ " actions to support");
+ }
+ return 0;
+}
+
+/**
+ * Internal preparation function. Allocates the DV flow size,
+ * this size is constant.
+ *
+ * @param[in] dev
+ * Pointer to the rte_eth_dev structure.
+ * @param[in] attr
+ * Pointer to the flow attributes.
+ * @param[in] items
+ * Pointer to the list of items.
+ * @param[in] actions
+ * Pointer to the list of actions.
+ * @param[out] error
+ * Pointer to the error structure.
+ *
+ * @return
+ * Pointer to mlx5_flow object on success,
+ * otherwise NULL and rte_errno is set.
+ */
+static struct mlx5_flow *
+flow_dv_prepare(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item items[] __rte_unused,
+ const struct rte_flow_action actions[] __rte_unused,
+ struct rte_flow_error *error)
+{
+ uint32_t handle_idx = 0;
+ struct mlx5_flow *dev_flow;
+ struct mlx5_flow_handle *dev_handle;
+ struct mlx5_priv *priv = dev->data->dev_private;
+
+ /* In case of corrupting the memory. */
+ if (priv->flow_idx >= MLX5_NUM_MAX_DEV_FLOWS) {
+ rte_flow_error_set(error, ENOSPC,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not free temporary device flow");
+ return NULL;
+ }
+ dev_handle = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
+ &handle_idx);
+ if (!dev_handle) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "not enough memory to create flow handle");
+ return NULL;
+ }
+ /* No multi-thread supporting. */
+ dev_flow = &((struct mlx5_flow *)priv->inter_flows)[priv->flow_idx++];
+ dev_flow->handle = dev_handle;
+ dev_flow->handle_idx = handle_idx;
+ /*
+ * In some old rdma-core releases, before continuing, a check of the
+ * length of matching parameter will be done at first. It needs to use
+ * the length without misc4 param. If the flow has misc4 support, then
+ * the length needs to be adjusted accordingly. Each param member is
+ * aligned with a 64B boundary naturally.
+ */
+ dev_flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param) -
+ MLX5_ST_SZ_BYTES(fte_match_set_misc4);
+ /*
+ * The matching value needs to be cleared to 0 before using. In the
+ * past, it will be automatically cleared when using rte_*alloc
+ * API. The time consumption will be almost the same as before.
+ */
+ memset(dev_flow->dv.value.buf, 0, MLX5_ST_SZ_BYTES(fte_match_param));
+ dev_flow->ingress = attr->ingress;
+ dev_flow->dv.transfer = attr->transfer;
+ return dev_flow;
+}
+
+#ifdef RTE_LIBRTE_MLX5_DEBUG
+/**
+ * Sanity check for match mask and value. Similar to check_valid_spec() in
+ * kernel driver. If unmasked bit is present in value, it returns failure.
+ *
+ * @param match_mask
+ * pointer to match mask buffer.
+ * @param match_value
+ * pointer to match value buffer.
+ *
+ * @return
+ * 0 if valid, -EINVAL otherwise.
+ */
+static int
+flow_dv_check_valid_spec(void *match_mask, void *match_value)
+{
+ uint8_t *m = match_mask;
+ uint8_t *v = match_value;
+ unsigned int i;
+
+ for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
+ if (v[i] & ~m[i]) {
+ DRV_LOG(ERR,
+ "match_value differs from match_criteria"
+ " %p[%u] != %p[%u]",
+ match_value, i, match_mask, i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+#endif
+
+/**
+ * Add match of ip_version.
+ *
+ * @param[in] group
+ * Flow group.
+ * @param[in] headers_v
+ * Values header pointer.
+ * @param[in] headers_m
+ * Masks header pointer.
+ * @param[in] ip_version
+ * The IP version to set.
+ */
+static inline void
+flow_dv_set_match_ip_version(uint32_t group,
+ void *headers_v,
+ void *headers_m,
+ uint8_t ip_version)
+{
+ if (group == 0)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ else
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version,
+ ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, 0);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype, 0);
+}
+
+/**
+ * Add Ethernet item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_eth(void *matcher, void *key,
+ const struct rte_flow_item *item, int inner,
+ uint32_t group)
+{
+ const struct rte_flow_item_eth *eth_m = item->mask;
+ const struct rte_flow_item_eth *eth_v = item->spec;
+ const struct rte_flow_item_eth nic_mask = {
+ .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
+ .type = RTE_BE16(0xffff),
+ };
+ void *headers_m;
+ void *headers_v;
+ char *l24_v;
+ unsigned int i;
+
+ if (!eth_v)
+ return;
+ if (!eth_m)
+ eth_m = &nic_mask;
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
+ ð_m->dst, sizeof(eth_m->dst));
+ /* The value must be in the range of the mask. */
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
+ for (i = 0; i < sizeof(eth_m->dst); ++i)
+ l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
+ ð_m->src, sizeof(eth_m->src));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
+ /* The value must be in the range of the mask. */
+ for (i = 0; i < sizeof(eth_m->dst); ++i)
+ l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
+ if (eth_v->type) {
+ /* When ethertype is present set mask for tagged VLAN. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ /* Set value for tagged VLAN if ethertype is 802.1Q. */
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_VLAN) ||
+ eth_v->type == RTE_BE16(RTE_ETHER_TYPE_QINQ)) {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag,
+ 1);
+ /* Return here to avoid setting match on ethertype. */
+ return;
+ }
+ }
+ /*
+ * HW supports match on one Ethertype, the Ethertype following the last
+ * VLAN tag of the packet (see PRM).
+ * Set match on ethertype only if ETH header is not followed by VLAN.
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ * eCPRI over Ether layer will use type value 0xAEFE.
+ */
+ if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (eth_v->type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ eth_m->type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(eth_m->type));
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ ethertype);
+ *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
+ }
+}
+
+/**
+ * Add VLAN item to matcher and to the value.
+ *
+ * @param[in, out] dev_flow
+ * Flow descriptor.
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
+ void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner, uint32_t group)
+{
+ const struct rte_flow_item_vlan *vlan_m = item->mask;
+ const struct rte_flow_item_vlan *vlan_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+ uint16_t tci_m;
+ uint16_t tci_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ /*
+ * This is workaround, masks are not supported,
+ * and pre-validated.
+ */
+ if (vlan_v)
+ dev_flow->handle->vf_vlan.tag =
+ rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
+ }
+ /*
+ * When VLAN item exists in flow, mark packet as tagged,
+ * even if TCI is not specified.
+ */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
+ if (!vlan_v)
+ return;
+ if (!vlan_m)
+ vlan_m = &rte_flow_item_vlan_mask;
+ tci_m = rte_be_to_cpu_16(vlan_m->tci);
+ tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
+ /*
+ * HW is optimized for IPv4/IPv6. In such cases, avoid setting
+ * ethertype, and use ip_version field instead.
+ */
+ if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV4) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ } else if (vlan_v->inner_type == RTE_BE16(RTE_ETHER_TYPE_IPV6) &&
+ vlan_m->inner_type == 0xFFFF) {
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ } else {
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+ rte_be_to_cpu_16(vlan_m->inner_type &
+ vlan_v->inner_type));
+ }
+}
+
+/**
+ * Add IPV4 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] inner
+ * Item is inner pattern.
+ * @param[in] group
+ * The group to insert the rule.
+ */
+static void
+flow_dv_translate_item_ipv4(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ const uint64_t item_flags,
+ int inner, uint32_t group)
+{
+ const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
+ const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
+ const struct rte_flow_item_ipv4 nic_mask = {
+ .hdr = {
+ .src_addr = RTE_BE32(0xffffffff),
+ .dst_addr = RTE_BE32(0xffffffff),
+ .type_of_service = 0xff,
+ .next_proto_id = 0xff,
+ .time_to_live = 0xff,
+ },
+ };
+ void *headers_m;
+ void *headers_v;
+ char *l24_m;
+ char *l24_v;
+ uint8_t tos;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 4);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ * This should be done even if item->spec is empty.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ if (!ipv4_v)
+ return;
+ if (!ipv4_m)
+ ipv4_m = &nic_mask;
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
+ *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
+ *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
+ tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
+ ipv4_m->hdr.type_of_service);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
+ ipv4_m->hdr.type_of_service >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+ ipv4_m->hdr.next_proto_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv4_m->hdr.time_to_live);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv4_v->hdr.time_to_live & ipv4_m->hdr.time_to_live);
+}
+
+/**
+ * Add IPV6 item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] item_flags
+ * Bit-fields that holds the items detected until now.
+ * @param[in] inner
+ * Item is inner pattern.
+ * @param[in] group
+ * The group to insert the rule.
+ */
+static void
+flow_dv_translate_item_ipv6(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ const uint64_t item_flags,
+ int inner, uint32_t group)
+{
+ const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
+ const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
+ const struct rte_flow_item_ipv6 nic_mask = {
+ .hdr = {
+ .src_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .dst_addr =
+ "\xff\xff\xff\xff\xff\xff\xff\xff"
+ "\xff\xff\xff\xff\xff\xff\xff\xff",
+ .vtc_flow = RTE_BE32(0xffffffff),
+ .proto = 0xff,
+ .hop_limits = 0xff,
+ },
+ };
+ void *headers_m;
+ void *headers_v;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ char *l24_m;
+ char *l24_v;
+ uint32_t vtc_m;
+ uint32_t vtc_v;
+ int i;
+ int size;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ flow_dv_set_match_ip_version(group, headers_v, headers_m, 6);
+ /*
+ * On outer header (which must contains L2), or inner header with L2,
+ * set cvlan_tag mask bit to mark this packet as untagged.
+ * This should be done even if item->spec is empty.
+ */
+ if (!inner || item_flags & MLX5_FLOW_LAYER_INNER_L2)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
+ if (!ipv6_v)
+ return;
+ if (!ipv6_m)
+ ipv6_m = &nic_mask;
+ size = sizeof(ipv6_m->hdr.dst_addr);
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
+ memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
+ for (i = 0; i < size; ++i)
+ l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
+ l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6);
+ memcpy(l24_m, ipv6_m->hdr.src_addr, size);
+ for (i = 0; i < size; ++i)
+ l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
+ /* TOS. */
+ vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
+ vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
+ /* Label. */
+ if (inner) {
+ MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
+ vtc_m);
+ MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
+ vtc_v);
+ } else {
+ MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
+ vtc_m);
+ MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
+ vtc_v);
+ }
+ /* Protocol. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
+ ipv6_m->hdr.proto);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+ ipv6_v->hdr.proto & ipv6_m->hdr.proto);
+ /* Hop limit. */
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ttl_hoplimit,
+ ipv6_m->hdr.hop_limits);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ttl_hoplimit,
+ ipv6_v->hdr.hop_limits & ipv6_m->hdr.hop_limits);
+}
+
+/**
+ * Add TCP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_tcp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_tcp *tcp_m = item->mask;
+ const struct rte_flow_item_tcp *tcp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
+ if (!tcp_v)
+ return;
+ if (!tcp_m)
+ tcp_m = &rte_flow_item_tcp_mask;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
+ rte_be_to_cpu_16(tcp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
+ rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
+ rte_be_to_cpu_16(tcp_m->hdr.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
+ rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
+ tcp_m->hdr.tcp_flags);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
+ (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
+}
+
+/**
+ * Add UDP item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_udp(void *matcher, void *key,
+ const struct rte_flow_item *item,
+ int inner)
+{
+ const struct rte_flow_item_udp *udp_m = item->mask;
+ const struct rte_flow_item_udp *udp_v = item->spec;
+ void *headers_m;
+ void *headers_v;
+
+ if (inner) {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ inner_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
+ } else {
+ headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
+ outer_headers);
+ headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
+ if (!udp_v)
+ return;
+ if (!udp_m)
+ udp_m = &rte_flow_item_udp_mask;
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
+ rte_be_to_cpu_16(udp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
+ rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
+ rte_be_to_cpu_16(udp_m->hdr.dst_port));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
+ rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
+}
+
+/**
+ * Add GRE optional Key item to matcher and to the value.
+ *
+ * @param[in, out] matcher
+ * Flow matcher.
+ * @param[in, out] key
+ * Flow matcher value.
+ * @param[in] item
+ * Flow pattern to translate.
+ * @param[in] inner
+ * Item is inner pattern.
+ */
+static void
+flow_dv_translate_item_gre_key(void *matcher, void *key,
+ const struct rte_flow_item *item)
+{
+ const rte_be32_t *key_m = item->mask;
+ const rte_be32_t *key_v = item->spec;
+ void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
+ void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
+ rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);