X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_switch_filter.c;h=8cba6eb7b161a3e8232591315fbd1816f62c7815;hb=cda28b1f59c96fa8e65c060c049d0689212d7f57;hp=afdc116eead7494f56378cad6e51d29e180b766c;hpb=86035f97fea5fc45a314cc0dce637e514eb168cc;p=dpdk.git diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index afdc116eea..8cba6eb7b1 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -26,7 +26,8 @@ #include "ice_dcf_ethdev.h" -#define MAX_QGRP_NUM_TYPE 7 +#define MAX_QGRP_NUM_TYPE 7 +#define MAX_INPUT_SET_BYTE 32 #define ICE_PPP_IPV4_PROTO 0x0021 #define ICE_PPP_IPV6_PROTO 0x0057 #define ICE_IPV4_PROTO_NVGRE 0x002F @@ -138,7 +139,42 @@ struct sw_meta { static struct ice_flow_parser ice_switch_dist_parser_os; static struct ice_flow_parser ice_switch_dist_parser_comms; -static struct ice_flow_parser ice_switch_perm_parser; +static struct ice_flow_parser ice_switch_perm_parser_os; +static struct ice_flow_parser ice_switch_perm_parser_comms; + +static struct +ice_pattern_match_item ice_switch_pattern_dist_os[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, + {pattern_ethertype_vlan, + ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, + ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, + ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6, + ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, + ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, + ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, + ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, + ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, +}; static struct ice_pattern_match_item ice_switch_pattern_dist_comms[] = { @@ -146,6 +182,8 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { ICE_SW_INSET_ETHER, ICE_INSET_NONE}, {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, @@ -227,7 +265,7 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { }; static struct -ice_pattern_match_item ice_switch_pattern_dist_os[] = { +ice_pattern_match_item ice_switch_pattern_perm_os[] = { {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE}, {pattern_ethertype_vlan, @@ -247,25 +285,27 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = { {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4, - ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, - ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, - ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4, - ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_udp, - ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, - ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, }; static struct -ice_pattern_match_item ice_switch_pattern_perm[] = { +ice_pattern_match_item ice_switch_pattern_perm_comms[] = { {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE}, {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, @@ -472,6 +512,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; uint64_t input_set = ICE_INSET_NONE; + uint16_t input_set_byte = 0; bool pppoe_elem_valid = 0; bool pppoe_patt_valid = 0; bool pppoe_prot_valid = 0; @@ -479,10 +520,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], bool profile_rule = 0; bool nvgre_valid = 0; bool vxlan_valid = 0; - bool ipv6_valiad = 0; - bool ipv4_valiad = 0; - bool udp_valiad = 0; - bool tcp_valiad = 0; + bool ipv6_valid = 0; + bool ipv4_valid = 0; + bool udp_valid = 0; + bool tcp_valid = 0; uint16_t j, t = 0; for (item = pattern; item->type != @@ -541,6 +582,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->src_addr[j] = eth_mask->src.addr_bytes[j]; i = 1; + input_set_byte++; } if (eth_mask->dst.addr_bytes[j]) { h->dst_addr[j] = @@ -548,6 +590,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->dst_addr[j] = eth_mask->dst.addr_bytes[j]; i = 1; + input_set_byte++; } } if (i) @@ -558,6 +601,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], eth_spec->type; list[t].m_u.ethertype.ethtype_id = eth_mask->type; + input_set_byte += 2; t++; } } @@ -566,7 +610,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV4: ipv4_spec = item->spec; ipv4_mask = item->mask; - ipv4_valiad = 1; + ipv4_valid = 1; if (ipv4_spec && ipv4_mask) { /* Check IPv4 mask and update input set */ if (ipv4_mask->hdr.version_ihl || @@ -617,24 +661,28 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv4_spec->hdr.src_addr; list[t].m_u.ipv4_hdr.src_addr = ipv4_mask->hdr.src_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.dst_addr) { list[t].h_u.ipv4_hdr.dst_addr = ipv4_spec->hdr.dst_addr; list[t].m_u.ipv4_hdr.dst_addr = ipv4_mask->hdr.dst_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.time_to_live) { list[t].h_u.ipv4_hdr.time_to_live = ipv4_spec->hdr.time_to_live; list[t].m_u.ipv4_hdr.time_to_live = ipv4_mask->hdr.time_to_live; + input_set_byte++; } if (ipv4_mask->hdr.next_proto_id) { list[t].h_u.ipv4_hdr.protocol = ipv4_spec->hdr.next_proto_id; list[t].m_u.ipv4_hdr.protocol = ipv4_mask->hdr.next_proto_id; + input_set_byte++; } if ((ipv4_spec->hdr.next_proto_id & ipv4_mask->hdr.next_proto_id) == @@ -645,6 +693,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv4_spec->hdr.type_of_service; list[t].m_u.ipv4_hdr.tos = ipv4_mask->hdr.type_of_service; + input_set_byte++; } t++; } @@ -653,7 +702,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV6: ipv6_spec = item->spec; ipv6_mask = item->mask; - ipv6_valiad = 1; + ipv6_valid = 1; if (ipv6_spec && ipv6_mask) { if (ipv6_mask->hdr.payload_len) { rte_flow_error_set(error, EINVAL, @@ -722,12 +771,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.src_addr[j]; s->src_addr[j] = ipv6_mask->hdr.src_addr[j]; + input_set_byte++; } if (ipv6_mask->hdr.dst_addr[j]) { f->dst_addr[j] = ipv6_spec->hdr.dst_addr[j]; s->dst_addr[j] = ipv6_mask->hdr.dst_addr[j]; + input_set_byte++; } } if (ipv6_mask->hdr.proto) { @@ -735,12 +786,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.proto; s->next_hdr = ipv6_mask->hdr.proto; + input_set_byte++; } if (ipv6_mask->hdr.hop_limits) { f->hop_limit = ipv6_spec->hdr.hop_limits; s->hop_limit = ipv6_mask->hdr.hop_limits; + input_set_byte++; } if (ipv6_mask->hdr.vtc_flow & rte_cpu_to_be_32 @@ -758,6 +811,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); + input_set_byte += 4; } t++; } @@ -766,7 +820,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; udp_mask = item->mask; - udp_valiad = 1; + udp_valid = 1; if (udp_spec && udp_mask) { /* Check UDP mask and update input set*/ if (udp_mask->hdr.dgram_len || @@ -803,21 +857,23 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], udp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = udp_mask->hdr.src_port; + input_set_byte += 2; } if (udp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = udp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = udp_mask->hdr.dst_port; + input_set_byte += 2; } - t++; + t++; } break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; tcp_mask = item->mask; - tcp_valiad = 1; + tcp_valid = 1; if (tcp_spec && tcp_mask) { /* Check TCP mask and update input set */ if (tcp_mask->hdr.sent_seq || @@ -855,12 +911,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], tcp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = tcp_mask->hdr.src_port; + input_set_byte += 2; } if (tcp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = tcp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = tcp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -900,12 +958,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], sctp_spec->hdr.src_port; list[t].m_u.sctp_hdr.src_port = sctp_mask->hdr.src_port; + input_set_byte += 2; } if (sctp_mask->hdr.dst_port) { list[t].h_u.sctp_hdr.dst_port = sctp_spec->hdr.dst_port; list[t].m_u.sctp_hdr.dst_port = sctp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -943,6 +1003,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], vxlan_mask->vni[0]; input_set |= ICE_INSET_TUN_VXLAN_VNI; + input_set_byte += 2; } t++; } @@ -980,6 +1041,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], nvgre_mask->tni[0]; input_set |= ICE_INSET_TUN_NVGRE_TNI; + input_set_byte += 2; } t++; } @@ -1008,6 +1070,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.vlan_hdr.vlan = vlan_mask->tci; input_set |= ICE_INSET_VLAN_OUTER; + input_set_byte += 2; } if (vlan_mask->inner_type) { list[t].h_u.vlan_hdr.type = @@ -1015,6 +1078,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.vlan_hdr.type = vlan_mask->inner_type; input_set |= ICE_INSET_ETHERTYPE; + input_set_byte += 2; } t++; } @@ -1055,6 +1119,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.session_id = pppoe_mask->session_id; input_set |= ICE_INSET_PPPOE_SESSION; + input_set_byte += 2; } t++; pppoe_elem_valid = 1; @@ -1087,7 +1152,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.ppp_prot_id = pppoe_proto_mask->proto_id; input_set |= ICE_INSET_PPPOE_PROTO; - + input_set_byte += 2; pppoe_prot_valid = 1; } if ((pppoe_proto_mask->proto_id & @@ -1126,16 +1191,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], if (!esp_spec && !esp_mask && !input_set) { profile_rule = 1; - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (esp_spec && esp_mask && esp_mask->hdr.spi){ - if (udp_valiad) + if (udp_valid) list[t].type = ICE_NAT_T; else list[t].type = ICE_ESP; @@ -1144,17 +1209,18 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.esp_hdr.spi = esp_mask->hdr.spi; input_set |= ICE_INSET_ESP_SPI; + input_set_byte += 4; t++; } if (!profile_rule) { - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_IPV6_NAT_T; - else if (ipv4_valiad && udp_valiad) + else if (ipv4_valid && udp_valid) *tun_type = ICE_SW_TUN_IPV4_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_ESP; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_ESP; } break; @@ -1185,12 +1251,12 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], if (!ah_spec && !ah_mask && !input_set) { profile_rule = 1; - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_AH; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (ah_spec && ah_mask && ah_mask->spi){ @@ -1200,15 +1266,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.ah_hdr.spi = ah_mask->spi; input_set |= ICE_INSET_AH_SPI; + input_set_byte += 4; t++; } if (!profile_rule) { - if (udp_valiad) + if (udp_valid) return 0; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_AH; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_AH; } break; @@ -1226,10 +1293,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } if (!l2tp_spec && !l2tp_mask && !input_set) { - if (ipv6_valiad) + if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (l2tp_spec && l2tp_mask && l2tp_mask->session_id){ @@ -1239,14 +1306,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.l2tpv3_sess_hdr.session_id = l2tp_mask->session_id; input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID; + input_set_byte += 4; t++; } if (!profile_rule) { - if (ipv6_valiad) + if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_L2TPV3; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_L2TPV3; } @@ -1280,7 +1348,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } if (pfcp_mask->s_field && pfcp_spec->s_field == 0x01 && - ipv6_valiad) + ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION; else if (pfcp_mask->s_field && @@ -1289,7 +1357,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION; else if (pfcp_mask->s_field && !pfcp_spec->s_field && - ipv6_valiad) + ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_PFCP_NODE; else if (pfcp_mask->s_field && @@ -1313,17 +1381,17 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } if (pppoe_patt_valid && !pppoe_prot_valid) { - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP; - else if (ipv6_valiad && tcp_valiad) + else if (ipv6_valid && tcp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP; - else if (ipv4_valiad && udp_valiad) + else if (ipv4_valid && udp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP; - else if (ipv4_valiad && tcp_valiad) + else if (ipv4_valid && tcp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV6; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV4; else *tun_type = ICE_SW_TUN_PPPOE; @@ -1334,16 +1402,24 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], *tun_type = ICE_SW_TUN_VXLAN; else if (nvgre_valid) *tun_type = ICE_SW_TUN_NVGRE; - else if (ipv4_valiad && tcp_valiad) + else if (ipv4_valid && tcp_valid) *tun_type = ICE_SW_IPV4_TCP; - else if (ipv4_valiad && udp_valiad) + else if (ipv4_valid && udp_valid) *tun_type = ICE_SW_IPV4_UDP; - else if (ipv6_valiad && tcp_valiad) + else if (ipv6_valid && tcp_valid) *tun_type = ICE_SW_IPV6_TCP; - else if (ipv6_valiad && udp_valiad) + else if (ipv6_valid && udp_valid) *tun_type = ICE_SW_IPV6_UDP; } + if (input_set_byte > MAX_INPUT_SET_BYTE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "too much input set"); + return -ENOTSUP; + } + *lkups_num = t; return input_set; @@ -1368,17 +1444,32 @@ ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad, case RTE_FLOW_ACTION_TYPE_VF: rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI; act_vf = action->conf; + + if (act_vf->id >= ad->real_hw.num_vfs && + !act_vf->original) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid vf id"); + return -rte_errno; + } + if (act_vf->original) rule_info->sw_act.vsi_handle = ad->real_hw.avf.bus.func; else rule_info->sw_act.vsi_handle = act_vf->id; break; + + case RTE_FLOW_ACTION_TYPE_DROP: + rule_info->sw_act.fltr_act = ICE_DROP_PACKET; + break; + default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, - "Invalid action type or queue number"); + "Invalid action type"); return -rte_errno; } } @@ -1430,11 +1521,11 @@ ice_switch_parse_action(struct ice_pf *pf, if ((act_qgrop->queue[0] + act_qgrop->queue_num) > dev->data->nb_rx_queues) - goto error; + goto error1; for (i = 0; i < act_qgrop->queue_num - 1; i++) if (act_qgrop->queue[i + 1] != act_qgrop->queue[i] + 1) - goto error; + goto error2; rule_info->sw_act.qgrp_size = act_qgrop->queue_num; break; @@ -1474,6 +1565,20 @@ error: actions, "Invalid action type or queue number"); return -rte_errno; + +error1: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid queue region indexes"); + return -rte_errno; + +error2: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Discontinuous queue region"); + return -rte_errno; } static int @@ -1617,12 +1722,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, rule_info.tun_type = tun_type; ret = ice_switch_check_action(actions, error); - if (ret) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid input action number"); + if (ret) goto error; - } if (ad->hw.dcf_enabled) ret = ice_switch_parse_dcf_action((void *)ad, actions, error, @@ -1630,12 +1731,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, else ret = ice_switch_parse_action(pf, actions, error, &rule_info); - if (ret) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid input action"); + if (ret) goto error; - } if (meta) { *meta = sw_meta_ptr; @@ -1760,7 +1857,7 @@ ice_switch_init(struct ice_adapter *ad) { int ret = 0; struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + struct ice_flow_parser *perm_parser; if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) dist_parser = &ice_switch_dist_parser_comms; @@ -1769,10 +1866,16 @@ ice_switch_init(struct ice_adapter *ad) else return -EINVAL; - if (ad->devargs.pipe_mode_support) + if (ad->devargs.pipe_mode_support) { + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + perm_parser = &ice_switch_perm_parser_comms; + else + perm_parser = &ice_switch_perm_parser_os; + ret = ice_register_parser(perm_parser, ad); - else + } else { ret = ice_register_parser(dist_parser, ad); + } return ret; } @@ -1780,17 +1883,25 @@ static void ice_switch_uninit(struct ice_adapter *ad) { struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + struct ice_flow_parser *perm_parser; if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) dist_parser = &ice_switch_dist_parser_comms; - else + else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) dist_parser = &ice_switch_dist_parser_os; + else + return; + + if (ad->devargs.pipe_mode_support) { + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + perm_parser = &ice_switch_perm_parser_comms; + else + perm_parser = &ice_switch_perm_parser_os; - if (ad->devargs.pipe_mode_support) ice_unregister_parser(perm_parser, ad); - else + } else { ice_unregister_parser(dist_parser, ad); + } } static struct @@ -1824,10 +1935,19 @@ ice_flow_parser ice_switch_dist_parser_comms = { }; static struct -ice_flow_parser ice_switch_perm_parser = { +ice_flow_parser ice_switch_perm_parser_os = { + .engine = &ice_switch_engine, + .array = ice_switch_pattern_perm_os, + .array_len = RTE_DIM(ice_switch_pattern_perm_os), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_PERMISSION, +}; + +static struct +ice_flow_parser ice_switch_perm_parser_comms = { .engine = &ice_switch_engine, - .array = ice_switch_pattern_perm, - .array_len = RTE_DIM(ice_switch_pattern_perm), + .array = ice_switch_pattern_perm_comms, + .array_len = RTE_DIM(ice_switch_pattern_perm_comms), .parse_pattern_action = ice_switch_parse_pattern_action, .stage = ICE_FLOW_STAGE_PERMISSION, };