+static int
+ice_fdir_parse_action_qregion(struct ice_pf *pf,
+ struct rte_flow_error *error,
+ const struct rte_flow_action *act,
+ struct ice_fdir_filter_conf *filter)
+{
+ const struct rte_flow_action_rss *rss = act->conf;
+ uint32_t i;
+
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid action.");
+ return -rte_errno;
+ }
+
+ if (rss->queue_num <= 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Queue region size can't be 0 or 1.");
+ return -rte_errno;
+ }
+
+ /* check if queue index for queue region is continuous */
+ for (i = 0; i < rss->queue_num - 1; i++) {
+ if (rss->queue[i + 1] != rss->queue[i] + 1) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Discontinuous queue region");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->queue[rss->queue_num - 1] >= pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "Invalid queue region indexes.");
+ return -rte_errno;
+ }
+
+ if (!(rte_is_power_of_2(rss->queue_num) &&
+ (rss->queue_num <= ICE_FDIR_MAX_QREGION_SIZE))) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "The region size should be any of the following values:"
+ "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
+ "of queues do not exceed the VSI allocation.");
+ return -rte_errno;
+ }
+
+ filter->input.q_index = rss->queue[0];
+ filter->input.q_region = rte_fls_u32(rss->queue_num) - 1;
+ filter->input.dest_ctl = ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QGROUP;
+
+ return 0;
+}
+
+static int
+ice_fdir_parse_action(struct ice_adapter *ad,
+ const struct rte_flow_action actions[],
+ struct rte_flow_error *error,
+ struct ice_fdir_filter_conf *filter)
+{
+ struct ice_pf *pf = &ad->pf;
+ const struct rte_flow_action_queue *act_q;
+ const struct rte_flow_action_mark *mark_spec = NULL;
+ const struct rte_flow_action_count *act_count;
+ uint32_t dest_num = 0;
+ uint32_t mark_num = 0;
+ uint32_t counter_num = 0;
+ int ret;
+
+ for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
+ switch (actions->type) {
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ dest_num++;
+
+ act_q = actions->conf;
+ filter->input.q_index = act_q->index;
+ if (filter->input.q_index >=
+ pf->dev_data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions,
+ "Invalid queue for FDIR.");
+ return -rte_errno;
+ }
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_QINDEX;
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DROP_PKT;
+ break;
+ case RTE_FLOW_ACTION_TYPE_PASSTHRU:
+ dest_num++;
+
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
+ break;
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ dest_num++;
+
+ ret = ice_fdir_parse_action_qregion(pf,
+ error, actions, filter);
+ if (ret)
+ return ret;
+ break;
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ mark_num++;
+ filter->mark_flag = 1;
+ mark_spec = actions->conf;
+ filter->input.fltr_id = mark_spec->id;
+ filter->input.fdid_prio = ICE_FXD_FLTR_QW1_FDID_PRI_ONE;
+ break;
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ counter_num++;
+
+ act_count = actions->conf;
+ filter->input.cnt_ena = ICE_FXD_FLTR_QW0_STAT_ENA_PKTS;
+ rte_memcpy(&filter->act_count, act_count,
+ sizeof(filter->act_count));
+
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
+ return -rte_errno;
+ }
+ }
+
+ if (dest_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Unsupported action combination");
+ return -rte_errno;
+ }
+
+ if (mark_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many mark actions");
+ return -rte_errno;
+ }
+
+ if (counter_num >= 2) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Too many count actions");
+ return -rte_errno;
+ }
+
+ if (dest_num + mark_num + counter_num == 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Empty action");
+ return -rte_errno;
+ }
+
+ /* set default action to PASSTHRU mode, in "mark/count only" case. */
+ if (dest_num == 0)
+ filter->input.dest_ctl =
+ ICE_FLTR_PRGM_DESC_DEST_DIRECT_PKT_OTHER;
+
+ return 0;
+}
+
+static int
+ice_fdir_parse_pattern(__rte_unused struct ice_adapter *ad,
+ const struct rte_flow_item pattern[],
+ struct rte_flow_error *error,
+ struct ice_fdir_filter_conf *filter)
+{
+ const struct rte_flow_item *item = pattern;
+ enum rte_flow_item_type item_type;
+ enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
+ enum rte_flow_item_type l4 = RTE_FLOW_ITEM_TYPE_END;
+ enum ice_fdir_tunnel_type tunnel_type = ICE_FDIR_TUNNEL_TYPE_NONE;
+ const struct rte_flow_item_raw *raw_spec, *raw_mask;
+ const struct rte_flow_item_eth *eth_spec, *eth_mask;
+ const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_last, *ipv4_mask;
+ const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
+ const struct rte_flow_item_ipv6_frag_ext *ipv6_frag_spec,
+ *ipv6_frag_mask;
+ const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
+ const struct rte_flow_item_udp *udp_spec, *udp_mask;
+ const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
+ const struct rte_flow_item_vxlan *vxlan_spec, *vxlan_mask;
+ const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
+ const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
+ const struct rte_flow_item_esp *esp_spec, *esp_mask;
+ uint64_t input_set_i = ICE_INSET_NONE; /* only for tunnel inner */
+ uint64_t input_set_o = ICE_INSET_NONE; /* non-tunnel and tunnel outer */
+ uint64_t *input_set;
+ uint8_t flow_type = ICE_FLTR_PTYPE_NONF_NONE;
+ uint8_t ipv6_addr_mask[16] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+ };
+ uint32_t vtc_flow_cpu;
+ uint16_t ether_type;
+ enum rte_flow_item_type next_type;
+ bool is_outer = true;
+ struct ice_fdir_extra *p_ext_data;
+ struct ice_fdir_v4 *p_v4 = NULL;
+ struct ice_fdir_v6 *p_v6 = NULL;
+ struct ice_parser_result rslt;
+ struct ice_parser *psr;
+ uint8_t item_num = 0;
+
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN)
+ tunnel_type = ICE_FDIR_TUNNEL_TYPE_VXLAN;
+ /* To align with shared code behavior, save gtpu outer
+ * fields in inner struct.
+ */
+ if (item->type == RTE_FLOW_ITEM_TYPE_GTPU ||
+ item->type == RTE_FLOW_ITEM_TYPE_GTP_PSC) {
+ is_outer = false;
+ }
+ item_num++;
+ }
+
+ /* This loop parse flow pattern and distinguish Non-tunnel and tunnel
+ * flow. input_set_i is used for inner part.
+ */
+ for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ item_type = item->type;
+
+ if (item->last && !(item_type == RTE_FLOW_ITEM_TYPE_IPV4 ||
+ item_type ==
+ RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Not support range");
+ }
+
+ input_set = (tunnel_type && !is_outer) ?
+ &input_set_i : &input_set_o;
+
+ switch (item_type) {
+ case RTE_FLOW_ITEM_TYPE_RAW: {
+ raw_spec = item->spec;
+ raw_mask = item->mask;
+
+ if (item_num != 1)
+ break;
+
+ /* convert raw spec & mask from byte string to int */
+ unsigned char *tmp_spec =
+ (uint8_t *)(uintptr_t)raw_spec->pattern;
+ unsigned char *tmp_mask =
+ (uint8_t *)(uintptr_t)raw_mask->pattern;
+ uint16_t udp_port = 0;
+ uint16_t tmp_val = 0;
+ uint8_t pkt_len = 0;
+ uint8_t tmp = 0;
+ int i, j;
+
+ pkt_len = strlen((char *)(uintptr_t)raw_spec->pattern);
+ if (strlen((char *)(uintptr_t)raw_mask->pattern) !=
+ pkt_len)
+ return -rte_errno;
+
+ for (i = 0, j = 0; i < pkt_len; i += 2, j++) {
+ tmp = tmp_spec[i];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val = tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val = tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val = tmp - '0';
+
+ tmp_val *= 16;
+ tmp = tmp_spec[i + 1];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_spec[j] = tmp_val + tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_spec[j] = tmp_val + tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_spec[j] = tmp_val + tmp - '0';
+
+ tmp = tmp_mask[i];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_val = tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_val = tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_val = tmp - '0';
+
+ tmp_val *= 16;
+ tmp = tmp_mask[i + 1];
+ if (tmp >= 'a' && tmp <= 'f')
+ tmp_mask[j] = tmp_val + tmp - 'a' + 10;
+ if (tmp >= 'A' && tmp <= 'F')
+ tmp_mask[j] = tmp_val + tmp - 'A' + 10;
+ if (tmp >= '0' && tmp <= '9')
+ tmp_mask[j] = tmp_val + tmp - '0';
+ }
+
+ pkt_len /= 2;
+
+ if (ice_parser_create(&ad->hw, &psr))
+ return -rte_errno;
+ if (ice_get_open_tunnel_port(&ad->hw, TNL_VXLAN,
+ &udp_port))
+ ice_parser_vxlan_tunnel_set(psr, udp_port,
+ true);
+ if (ice_parser_run(psr, tmp_spec, pkt_len, &rslt))
+ return -rte_errno;
+ ice_parser_destroy(psr);
+
+ if (!tmp_mask)
+ return -rte_errno;
+
+ filter->prof = (struct ice_parser_profile *)
+ ice_malloc(&ad->hw, sizeof(*filter->prof));
+ if (!filter->prof)
+ return -ENOMEM;
+
+ if (ice_parser_profile_init(&rslt, tmp_spec, tmp_mask,
+ pkt_len, ICE_BLK_FD, true, filter->prof))
+ return -rte_errno;
+
+ u8 *pkt_buf = (u8 *)ice_malloc(&ad->hw, pkt_len + 1);
+ if (!pkt_buf)
+ return -ENOMEM;
+ rte_memcpy(pkt_buf, tmp_spec, pkt_len);
+ filter->pkt_buf = pkt_buf;
+
+ filter->pkt_len = pkt_len;
+
+ filter->parser_ena = true;
+
+ break;
+ }
+
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ flow_type = ICE_FLTR_PTYPE_NON_IP_L2;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
+
+ if (!(eth_spec && eth_mask))
+ break;
+
+ if (!rte_is_zero_ether_addr(ð_mask->dst))
+ *input_set |= ICE_INSET_DMAC;
+ if (!rte_is_zero_ether_addr(ð_mask->src))
+ *input_set |= ICE_INSET_SMAC;
+
+ next_type = (item + 1)->type;
+ /* Ignore this field except for ICE_FLTR_PTYPE_NON_IP_L2 */
+ if (eth_mask->type == RTE_BE16(0xffff) &&
+ next_type == RTE_FLOW_ITEM_TYPE_END) {
+ *input_set |= ICE_INSET_ETHERTYPE;
+ ether_type = rte_be_to_cpu_16(eth_spec->type);
+
+ if (ether_type == RTE_ETHER_TYPE_IPV4 ||
+ ether_type == RTE_ETHER_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Unsupported ether_type.");
+ return -rte_errno;
+ }
+ }
+
+ p_ext_data = (tunnel_type && is_outer) ?
+ &filter->input.ext_data_outer :
+ &filter->input.ext_data;
+ rte_memcpy(&p_ext_data->src_mac,
+ ð_spec->src, RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&p_ext_data->dst_mac,
+ ð_spec->dst, RTE_ETHER_ADDR_LEN);
+ rte_memcpy(&p_ext_data->ether_type,
+ ð_spec->type, sizeof(eth_spec->type));
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_OTHER;
+ l3 = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_spec = item->spec;
+ ipv4_last = item->last;
+ ipv4_mask = item->mask;
+ p_v4 = (tunnel_type && is_outer) ?
+ &filter->input.ip_outer.v4 :
+ &filter->input.ip.v4;
+
+ if (!(ipv4_spec && ipv4_mask))
+ break;
+
+ /* Check IPv4 mask and update input set */
+ if (ipv4_mask->hdr.version_ihl ||
+ ipv4_mask->hdr.total_length ||
+ ipv4_mask->hdr.hdr_checksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ if (ipv4_last &&
+ (ipv4_last->hdr.version_ihl ||
+ ipv4_last->hdr.type_of_service ||
+ ipv4_last->hdr.time_to_live ||
+ ipv4_last->hdr.total_length |
+ ipv4_last->hdr.next_proto_id ||
+ ipv4_last->hdr.hdr_checksum ||
+ ipv4_last->hdr.src_addr ||
+ ipv4_last->hdr.dst_addr)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv4 last.");
+ return -rte_errno;
+ }
+
+ if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
+ *input_set |= ICE_INSET_IPV4_DST;
+ if (ipv4_mask->hdr.src_addr == UINT32_MAX)
+ *input_set |= ICE_INSET_IPV4_SRC;
+ if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
+ *input_set |= ICE_INSET_IPV4_TTL;
+ if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
+ *input_set |= ICE_INSET_IPV4_PROTO;
+ if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
+ *input_set |= ICE_INSET_IPV4_TOS;
+
+ p_v4->dst_ip = ipv4_spec->hdr.dst_addr;
+ p_v4->src_ip = ipv4_spec->hdr.src_addr;
+ p_v4->ttl = ipv4_spec->hdr.time_to_live;
+ p_v4->proto = ipv4_spec->hdr.next_proto_id;
+ p_v4->tos = ipv4_spec->hdr.type_of_service;
+
+ /* fragment Ipv4:
+ * spec is 0x2000, mask is 0x2000
+ */
+ if (ipv4_spec->hdr.fragment_offset ==
+ rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG) &&
+ ipv4_mask->hdr.fragment_offset ==
+ rte_cpu_to_be_16(RTE_IPV4_HDR_MF_FLAG)) {
+ /* all IPv4 fragment packet has the same
+ * ethertype, if the spec and mask is valid,
+ * set ethertype into input set.
+ */
+ flow_type = ICE_FLTR_PTYPE_FRAG_IPV4;
+ *input_set |= ICE_INSET_ETHERTYPE;
+ input_set_o |= ICE_INSET_ETHERTYPE;
+ } else if (ipv4_mask->hdr.packet_id == UINT16_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv4 mask.");
+ return -rte_errno;
+ }
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_OTHER;
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6;
+ ipv6_spec = item->spec;
+ ipv6_mask = item->mask;
+ p_v6 = (tunnel_type && is_outer) ?
+ &filter->input.ip_outer.v6 :
+ &filter->input.ip.v6;
+
+ if (!(ipv6_spec && ipv6_mask))
+ break;
+
+ /* Check IPv6 mask and update input set */
+ if (ipv6_mask->hdr.payload_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid IPv6 mask");
+ return -rte_errno;
+ }
+
+ if (!memcmp(ipv6_mask->hdr.src_addr, ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.src_addr)))
+ *input_set |= ICE_INSET_IPV6_SRC;
+ if (!memcmp(ipv6_mask->hdr.dst_addr, ipv6_addr_mask,
+ RTE_DIM(ipv6_mask->hdr.dst_addr)))
+ *input_set |= ICE_INSET_IPV6_DST;
+
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+ == rte_cpu_to_be_32(ICE_IPV6_TC_MASK))
+ *input_set |= ICE_INSET_IPV6_TC;
+ if (ipv6_mask->hdr.proto == UINT8_MAX)
+ *input_set |= ICE_INSET_IPV6_NEXT_HDR;
+ if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
+ *input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+
+ rte_memcpy(&p_v6->dst_ip, ipv6_spec->hdr.dst_addr, 16);
+ rte_memcpy(&p_v6->src_ip, ipv6_spec->hdr.src_addr, 16);
+ vtc_flow_cpu = rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
+ p_v6->tc = (uint8_t)(vtc_flow_cpu >> ICE_FDIR_IPV6_TC_OFFSET);
+ p_v6->proto = ipv6_spec->hdr.proto;
+ p_v6->hlim = ipv6_spec->hdr.hop_limits;
+ break;
+ case RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT:
+ l3 = RTE_FLOW_ITEM_TYPE_IPV6_FRAG_EXT;
+ flow_type = ICE_FLTR_PTYPE_FRAG_IPV6;
+ ipv6_frag_spec = item->spec;
+ ipv6_frag_mask = item->mask;
+
+ if (!(ipv6_frag_spec && ipv6_frag_mask))
+ break;
+
+ /* fragment Ipv6:
+ * spec is 0x1, mask is 0x1
+ */
+ if (ipv6_frag_spec->hdr.frag_data ==
+ rte_cpu_to_be_16(1) &&
+ ipv6_frag_mask->hdr.frag_data ==
+ rte_cpu_to_be_16(1)) {
+ /* all IPv6 fragment packet has the same
+ * ethertype, if the spec and mask is valid,
+ * set ethertype into input set.
+ */
+ *input_set |= ICE_INSET_ETHERTYPE;
+ input_set_o |= ICE_INSET_ETHERTYPE;
+ } else if (ipv6_frag_mask->hdr.id == UINT32_MAX) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Invalid IPv6 mask.");
+ return -rte_errno;
+ }
+
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_TCP;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_TCP;
+
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+
+ if (!(tcp_spec && tcp_mask))
+ break;
+
+ /* Check TCP mask and update input set */
+ if (tcp_mask->hdr.sent_seq ||
+ tcp_mask->hdr.recv_ack ||
+ tcp_mask->hdr.data_off ||
+ tcp_mask->hdr.tcp_flags ||
+ tcp_mask->hdr.rx_win ||
+ tcp_mask->hdr.cksum ||
+ tcp_mask->hdr.tcp_urp) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid TCP mask");
+ return -rte_errno;
+ }
+
+ if (tcp_mask->hdr.src_port == UINT16_MAX)
+ *input_set |= ICE_INSET_TCP_SRC_PORT;
+ if (tcp_mask->hdr.dst_port == UINT16_MAX)
+ *input_set |= ICE_INSET_TCP_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ assert(p_v4);
+ p_v4->dst_port = tcp_spec->hdr.dst_port;
+ p_v4->src_port = tcp_spec->hdr.src_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ assert(p_v6);
+ p_v6->dst_port = tcp_spec->hdr.dst_port;
+ p_v6->src_port = tcp_spec->hdr.src_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ l4 = RTE_FLOW_ITEM_TYPE_UDP;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_UDP;
+
+ udp_spec = item->spec;
+ udp_mask = item->mask;
+
+ if (!(udp_spec && udp_mask))
+ break;
+
+ /* Check UDP mask and update input set*/
+ if (udp_mask->hdr.dgram_len ||
+ udp_mask->hdr.dgram_cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (udp_mask->hdr.src_port == UINT16_MAX)
+ *input_set |= ICE_INSET_UDP_SRC_PORT;
+ if (udp_mask->hdr.dst_port == UINT16_MAX)
+ *input_set |= ICE_INSET_UDP_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ assert(p_v4);
+ p_v4->dst_port = udp_spec->hdr.dst_port;
+ p_v4->src_port = udp_spec->hdr.src_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ assert(p_v6);
+ p_v6->src_port = udp_spec->hdr.src_port;
+ p_v6->dst_port = udp_spec->hdr.dst_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_SCTP;
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_SCTP;
+
+ sctp_spec = item->spec;
+ sctp_mask = item->mask;
+
+ if (!(sctp_spec && sctp_mask))
+ break;
+
+ /* Check SCTP mask and update input set */
+ if (sctp_mask->hdr.cksum) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid UDP mask");
+ return -rte_errno;
+ }
+
+ if (sctp_mask->hdr.src_port == UINT16_MAX)
+ *input_set |= ICE_INSET_SCTP_SRC_PORT;
+ if (sctp_mask->hdr.dst_port == UINT16_MAX)
+ *input_set |= ICE_INSET_SCTP_DST_PORT;
+
+ /* Get filter info */
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
+ assert(p_v4);
+ p_v4->dst_port = sctp_spec->hdr.dst_port;
+ p_v4->src_port = sctp_spec->hdr.src_port;
+ } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
+ assert(p_v6);
+ p_v6->dst_port = sctp_spec->hdr.dst_port;
+ p_v6->src_port = sctp_spec->hdr.src_port;
+ }
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ break;
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ l3 = RTE_FLOW_ITEM_TYPE_END;
+ vxlan_spec = item->spec;
+ vxlan_mask = item->mask;
+ is_outer = false;
+
+ if (!(vxlan_spec && vxlan_mask))
+ break;
+
+ if (vxlan_mask->hdr.vx_flags) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid vxlan field");
+ return -rte_errno;
+ }
+
+ if (vxlan_mask->hdr.vx_vni)
+ *input_set |= ICE_INSET_VXLAN_VNI;
+
+ filter->input.vxlan_data.vni = vxlan_spec->hdr.vx_vni;
+
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ l3 = RTE_FLOW_ITEM_TYPE_END;
+ tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU;
+ gtp_spec = item->spec;
+ gtp_mask = item->mask;
+
+ if (!(gtp_spec && gtp_mask))
+ break;
+
+ if (gtp_mask->v_pt_rsv_flags ||
+ gtp_mask->msg_type ||
+ gtp_mask->msg_len) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid GTP mask");
+ return -rte_errno;
+ }
+
+ if (gtp_mask->teid == UINT32_MAX)
+ input_set_o |= ICE_INSET_GTPU_TEID;
+
+ filter->input.gtpu_data.teid = gtp_spec->teid;
+ break;
+ case RTE_FLOW_ITEM_TYPE_GTP_PSC:
+ tunnel_type = ICE_FDIR_TUNNEL_TYPE_GTPU_EH;
+ gtp_psc_spec = item->spec;
+ gtp_psc_mask = item->mask;
+
+ if (!(gtp_psc_spec && gtp_psc_mask))
+ break;
+
+ if (gtp_psc_mask->hdr.qfi == 0x3F)
+ input_set_o |= ICE_INSET_GTPU_QFI;
+
+ filter->input.gtpu_data.qfi =
+ gtp_psc_spec->hdr.qfi;
+ break;
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ l4 == RTE_FLOW_ITEM_TYPE_UDP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_NAT_T_ESP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
+ l4 == RTE_FLOW_ITEM_TYPE_UDP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_NAT_T_ESP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV4 &&
+ l4 == RTE_FLOW_ITEM_TYPE_END)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_ESP;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6 &&
+ l4 == RTE_FLOW_ITEM_TYPE_END)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_ESP;
+
+ esp_spec = item->spec;
+ esp_mask = item->mask;
+
+ if (!(esp_spec && esp_mask))
+ break;
+
+ if (esp_mask->hdr.spi == UINT32_MAX) {
+ if (l4 == RTE_FLOW_ITEM_TYPE_UDP)
+ *input_set |= ICE_INSET_NAT_T_ESP_SPI;
+ else
+ *input_set |= ICE_INSET_ESP_SPI;
+ }
+
+ if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
+ filter->input.ip.v4.sec_parm_idx =
+ esp_spec->hdr.spi;
+ else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
+ filter->input.ip.v6.sec_parm_idx =
+ esp_spec->hdr.spi;
+ break;
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid pattern item.");
+ return -rte_errno;
+ }
+ }
+
+ if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU;
+ else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_GTPU_EH;
+ else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU &&
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU;
+ else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_GTPU_EH &&
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV6_GTPU_EH;
+ else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_UDP;
+ else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_TCP;
+ else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_SCTP;
+ else if (tunnel_type == ICE_FDIR_TUNNEL_TYPE_VXLAN &&
+ flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
+ flow_type = ICE_FLTR_PTYPE_NONF_IPV4_UDP_VXLAN_IPV4_OTHER;
+
+ filter->tunnel_type = tunnel_type;
+ filter->input.flow_type = flow_type;
+ filter->input_set_o = input_set_o;
+ filter->input_set_i = input_set_i;
+
+ return 0;
+}
+
+static int
+ice_fdir_parse(struct ice_adapter *ad,
+ struct ice_pattern_match_item *array,
+ uint32_t array_len,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ uint32_t priority,
+ void **meta,
+ struct rte_flow_error *error)
+{
+ struct ice_pf *pf = &ad->pf;
+ struct ice_fdir_filter_conf *filter = &pf->fdir.conf;
+ struct ice_pattern_match_item *item = NULL;
+ uint64_t input_set;
+ bool raw = false;
+ int ret;
+
+ memset(filter, 0, sizeof(*filter));
+ item = ice_search_pattern_match_item(ad, pattern, array, array_len,
+ error);
+
+ if (!ad->devargs.pipe_mode_support && priority >= 1)
+ return -rte_errno;
+
+ if (!item)
+ return -rte_errno;
+
+ ret = ice_fdir_parse_pattern(ad, pattern, error, filter);
+ if (ret)
+ goto error;
+
+ if (item->pattern_list[0] == RTE_FLOW_ITEM_TYPE_RAW)
+ raw = true;
+
+ input_set = filter->input_set_o | filter->input_set_i;
+ input_set = raw ? ~input_set : input_set;
+
+ if (!input_set || filter->input_set_o &
+ ~(item->input_set_mask_o | ICE_INSET_ETHERTYPE) ||
+ filter->input_set_i & ~item->input_set_mask_i) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
+ pattern,
+ "Invalid input set");
+ ret = -rte_errno;
+ goto error;
+ }
+
+ ret = ice_fdir_parse_action(ad, actions, error, filter);
+ if (ret)
+ goto error;
+
+ if (meta)
+ *meta = filter;
+
+ rte_free(item);
+ return ret;
+error:
+ rte_free(filter->prof);
+ rte_free(filter->pkt_buf);
+ rte_free(item);
+ return ret;
+}
+
+static struct ice_flow_parser ice_fdir_parser = {
+ .engine = &ice_fdir_engine,
+ .array = ice_fdir_pattern_list,
+ .array_len = RTE_DIM(ice_fdir_pattern_list),
+ .parse_pattern_action = ice_fdir_parse,
+ .stage = ICE_FLOW_STAGE_DISTRIBUTOR,
+};
+