X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_switch_filter.c;h=e5b7d56068f7f6858458f8c0da0abfa3490ead92;hb=68b6240ee9f0484ae7c75705c960922aa500544e;hp=dae0d470b4e170f5f9be9e86a6c980119bd2ea46;hpb=ea1e91e962e3d3915a041a20df5d849e5ce140f3;p=dpdk.git diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index dae0d470b4..e5b7d56068 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -26,7 +26,8 @@ #include "ice_dcf_ethdev.h" -#define MAX_QGRP_NUM_TYPE 7 +#define MAX_QGRP_NUM_TYPE 7 +#define MAX_INPUT_SET_BYTE 32 #define ICE_PPP_IPV4_PROTO 0x0021 #define ICE_PPP_IPV6_PROTO 0x0057 #define ICE_IPV4_PROTO_NVGRE 0x002F @@ -136,16 +137,17 @@ struct sw_meta { struct ice_adv_rule_info rule_info; }; -static struct ice_flow_parser ice_switch_dist_parser_os; -static struct ice_flow_parser ice_switch_dist_parser_comms; +static struct ice_flow_parser ice_switch_dist_parser; static struct ice_flow_parser ice_switch_perm_parser; static struct -ice_pattern_match_item ice_switch_pattern_dist_comms[] = { +ice_pattern_match_item ice_switch_pattern_dist_list[] = { {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE}, {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, @@ -227,45 +229,13 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { }; static struct -ice_pattern_match_item ice_switch_pattern_dist_os[] = { +ice_pattern_match_item ice_switch_pattern_perm_list[] = { {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE}, {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, {pattern_eth_arp, - ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4, - ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp, - ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_tcp, - ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv6, - ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, - {pattern_eth_ipv6_udp, - ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv6_tcp, - ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4, - ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, - ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, - ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4, - ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_udp, - ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, - ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, -}; - -static struct -ice_pattern_match_item ice_switch_pattern_perm[] = { - {pattern_ethertype, - ICE_SW_INSET_ETHER, ICE_INSET_NONE}, - {pattern_ethertype_vlan, - ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, @@ -472,15 +442,18 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; uint64_t input_set = ICE_INSET_NONE; + uint16_t input_set_byte = 0; bool pppoe_elem_valid = 0; bool pppoe_patt_valid = 0; bool pppoe_prot_valid = 0; - bool profile_rule = 0; bool tunnel_valid = 0; - bool ipv6_valiad = 0; - bool ipv4_valiad = 0; - bool udp_valiad = 0; - bool tcp_valiad = 0; + bool profile_rule = 0; + bool nvgre_valid = 0; + bool vxlan_valid = 0; + bool ipv6_valid = 0; + bool ipv4_valid = 0; + bool udp_valid = 0; + bool tcp_valid = 0; uint16_t j, t = 0; for (item = pattern; item->type != @@ -539,6 +512,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->src_addr[j] = eth_mask->src.addr_bytes[j]; i = 1; + input_set_byte++; } if (eth_mask->dst.addr_bytes[j]) { h->dst_addr[j] = @@ -546,6 +520,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->dst_addr[j] = eth_mask->dst.addr_bytes[j]; i = 1; + input_set_byte++; } } if (i) @@ -556,6 +531,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], eth_spec->type; list[t].m_u.ethertype.ethtype_id = eth_mask->type; + input_set_byte += 2; t++; } } @@ -564,7 +540,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV4: ipv4_spec = item->spec; ipv4_mask = item->mask; - ipv4_valiad = 1; + ipv4_valid = 1; if (ipv4_spec && ipv4_mask) { /* Check IPv4 mask and update input set */ if (ipv4_mask->hdr.version_ihl || @@ -615,24 +591,28 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv4_spec->hdr.src_addr; list[t].m_u.ipv4_hdr.src_addr = ipv4_mask->hdr.src_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.dst_addr) { list[t].h_u.ipv4_hdr.dst_addr = ipv4_spec->hdr.dst_addr; list[t].m_u.ipv4_hdr.dst_addr = ipv4_mask->hdr.dst_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.time_to_live) { list[t].h_u.ipv4_hdr.time_to_live = ipv4_spec->hdr.time_to_live; list[t].m_u.ipv4_hdr.time_to_live = ipv4_mask->hdr.time_to_live; + input_set_byte++; } if (ipv4_mask->hdr.next_proto_id) { list[t].h_u.ipv4_hdr.protocol = ipv4_spec->hdr.next_proto_id; list[t].m_u.ipv4_hdr.protocol = ipv4_mask->hdr.next_proto_id; + input_set_byte++; } if ((ipv4_spec->hdr.next_proto_id & ipv4_mask->hdr.next_proto_id) == @@ -643,6 +623,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv4_spec->hdr.type_of_service; list[t].m_u.ipv4_hdr.tos = ipv4_mask->hdr.type_of_service; + input_set_byte++; } t++; } @@ -651,7 +632,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV6: ipv6_spec = item->spec; ipv6_mask = item->mask; - ipv6_valiad = 1; + ipv6_valid = 1; if (ipv6_spec && ipv6_mask) { if (ipv6_mask->hdr.payload_len) { rte_flow_error_set(error, EINVAL, @@ -720,12 +701,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.src_addr[j]; s->src_addr[j] = ipv6_mask->hdr.src_addr[j]; + input_set_byte++; } if (ipv6_mask->hdr.dst_addr[j]) { f->dst_addr[j] = ipv6_spec->hdr.dst_addr[j]; s->dst_addr[j] = ipv6_mask->hdr.dst_addr[j]; + input_set_byte++; } } if (ipv6_mask->hdr.proto) { @@ -733,12 +716,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.proto; s->next_hdr = ipv6_mask->hdr.proto; + input_set_byte++; } if (ipv6_mask->hdr.hop_limits) { f->hop_limit = ipv6_spec->hdr.hop_limits; s->hop_limit = ipv6_mask->hdr.hop_limits; + input_set_byte++; } if (ipv6_mask->hdr.vtc_flow & rte_cpu_to_be_32 @@ -756,6 +741,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); + input_set_byte += 4; } t++; } @@ -764,7 +750,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; udp_mask = item->mask; - udp_valiad = 1; + udp_valid = 1; if (udp_spec && udp_mask) { /* Check UDP mask and update input set*/ if (udp_mask->hdr.dgram_len || @@ -801,21 +787,23 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], udp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = udp_mask->hdr.src_port; + input_set_byte += 2; } if (udp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = udp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = udp_mask->hdr.dst_port; + input_set_byte += 2; } - t++; + t++; } break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; tcp_mask = item->mask; - tcp_valiad = 1; + tcp_valid = 1; if (tcp_spec && tcp_mask) { /* Check TCP mask and update input set */ if (tcp_mask->hdr.sent_seq || @@ -853,12 +841,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], tcp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = tcp_mask->hdr.src_port; + input_set_byte += 2; } if (tcp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = tcp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = tcp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -898,12 +888,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], sctp_spec->hdr.src_port; list[t].m_u.sctp_hdr.src_port = sctp_mask->hdr.src_port; + input_set_byte += 2; } if (sctp_mask->hdr.dst_port) { list[t].h_u.sctp_hdr.dst_port = sctp_spec->hdr.dst_port; list[t].m_u.sctp_hdr.dst_port = sctp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -924,7 +916,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], "Invalid VXLAN item"); return 0; } - + vxlan_valid = 1; tunnel_valid = 1; if (vxlan_spec && vxlan_mask) { list[t].type = ICE_VXLAN; @@ -941,6 +933,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], vxlan_mask->vni[0]; input_set |= ICE_INSET_TUN_VXLAN_VNI; + input_set_byte += 2; } t++; } @@ -961,6 +954,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], "Invalid NVGRE item"); return 0; } + nvgre_valid = 1; tunnel_valid = 1; if (nvgre_spec && nvgre_mask) { list[t].type = ICE_NVGRE; @@ -977,6 +971,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], nvgre_mask->tni[0]; input_set |= ICE_INSET_TUN_NVGRE_TNI; + input_set_byte += 2; } t++; } @@ -1005,6 +1000,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.vlan_hdr.vlan = vlan_mask->tci; input_set |= ICE_INSET_VLAN_OUTER; + input_set_byte += 2; } if (vlan_mask->inner_type) { list[t].h_u.vlan_hdr.type = @@ -1012,6 +1008,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.vlan_hdr.type = vlan_mask->inner_type; input_set |= ICE_INSET_ETHERTYPE; + input_set_byte += 2; } t++; } @@ -1052,6 +1049,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.session_id = pppoe_mask->session_id; input_set |= ICE_INSET_PPPOE_SESSION; + input_set_byte += 2; } t++; pppoe_elem_valid = 1; @@ -1084,7 +1082,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.ppp_prot_id = pppoe_proto_mask->proto_id; input_set |= ICE_INSET_PPPOE_PROTO; - + input_set_byte += 2; pppoe_prot_valid = 1; } if ((pppoe_proto_mask->proto_id & @@ -1123,16 +1121,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], if (!esp_spec && !esp_mask && !input_set) { profile_rule = 1; - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (esp_spec && esp_mask && esp_mask->hdr.spi){ - if (udp_valiad) + if (udp_valid) list[t].type = ICE_NAT_T; else list[t].type = ICE_ESP; @@ -1141,17 +1139,18 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.esp_hdr.spi = esp_mask->hdr.spi; input_set |= ICE_INSET_ESP_SPI; + input_set_byte += 4; t++; } if (!profile_rule) { - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_IPV6_NAT_T; - else if (ipv4_valiad && udp_valiad) + else if (ipv4_valid && udp_valid) *tun_type = ICE_SW_TUN_IPV4_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_ESP; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_ESP; } break; @@ -1182,12 +1181,12 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], if (!ah_spec && !ah_mask && !input_set) { profile_rule = 1; - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_AH; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (ah_spec && ah_mask && ah_mask->spi){ @@ -1197,15 +1196,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.ah_hdr.spi = ah_mask->spi; input_set |= ICE_INSET_AH_SPI; + input_set_byte += 4; t++; } if (!profile_rule) { - if (udp_valiad) + if (udp_valid) return 0; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_AH; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_AH; } break; @@ -1223,10 +1223,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } if (!l2tp_spec && !l2tp_mask && !input_set) { - if (ipv6_valiad) + if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (l2tp_spec && l2tp_mask && l2tp_mask->session_id){ @@ -1236,14 +1236,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.l2tpv3_sess_hdr.session_id = l2tp_mask->session_id; input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID; + input_set_byte += 4; t++; } if (!profile_rule) { - if (ipv6_valiad) + if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_L2TPV3; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_L2TPV3; } @@ -1277,7 +1278,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } if (pfcp_mask->s_field && pfcp_spec->s_field == 0x01 && - ipv6_valiad) + ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION; else if (pfcp_mask->s_field && @@ -1286,7 +1287,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION; else if (pfcp_mask->s_field && !pfcp_spec->s_field && - ipv6_valiad) + ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_PFCP_NODE; else if (pfcp_mask->s_field && @@ -1310,22 +1311,45 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } if (pppoe_patt_valid && !pppoe_prot_valid) { - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP; - else if (ipv6_valiad && tcp_valiad) + else if (ipv6_valid && tcp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP; - else if (ipv4_valiad && udp_valiad) + else if (ipv4_valid && udp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP; - else if (ipv4_valiad && tcp_valiad) + else if (ipv4_valid && tcp_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV6; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_PPPOE_IPV4; else *tun_type = ICE_SW_TUN_PPPOE; } + if (*tun_type == ICE_NON_TUN) { + if (vxlan_valid) + *tun_type = ICE_SW_TUN_VXLAN; + else if (nvgre_valid) + *tun_type = ICE_SW_TUN_NVGRE; + else if (ipv4_valid && tcp_valid) + *tun_type = ICE_SW_IPV4_TCP; + else if (ipv4_valid && udp_valid) + *tun_type = ICE_SW_IPV4_UDP; + else if (ipv6_valid && tcp_valid) + *tun_type = ICE_SW_IPV6_TCP; + else if (ipv6_valid && udp_valid) + *tun_type = ICE_SW_IPV6_UDP; + } + + if (input_set_byte > MAX_INPUT_SET_BYTE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "too much input set"); + return -ENOTSUP; + } + *lkups_num = t; return input_set; @@ -1350,17 +1374,32 @@ ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad, case RTE_FLOW_ACTION_TYPE_VF: rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI; act_vf = action->conf; + + if (act_vf->id >= ad->real_hw.num_vfs && + !act_vf->original) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid vf id"); + return -rte_errno; + } + if (act_vf->original) rule_info->sw_act.vsi_handle = ad->real_hw.avf.bus.func; else rule_info->sw_act.vsi_handle = act_vf->id; break; + + case RTE_FLOW_ACTION_TYPE_DROP: + rule_info->sw_act.fltr_act = ICE_DROP_PACKET; + break; + default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, - "Invalid action type or queue number"); + "Invalid action type"); return -rte_errno; } } @@ -1412,11 +1451,11 @@ ice_switch_parse_action(struct ice_pf *pf, if ((act_qgrop->queue[0] + act_qgrop->queue_num) > dev->data->nb_rx_queues) - goto error; + goto error1; for (i = 0; i < act_qgrop->queue_num - 1; i++) if (act_qgrop->queue[i + 1] != act_qgrop->queue[i] + 1) - goto error; + goto error2; rule_info->sw_act.qgrp_size = act_qgrop->queue_num; break; @@ -1456,6 +1495,20 @@ error: actions, "Invalid action type or queue number"); return -rte_errno; + +error1: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid queue region indexes"); + return -rte_errno; + +error2: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Discontinuous queue region"); + return -rte_errno; } static int @@ -1542,10 +1595,6 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { item_num++; - if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) - tun_type = ICE_SW_TUN_VXLAN; - if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) - tun_type = ICE_SW_TUN_NVGRE; if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { const struct rte_flow_item_eth *eth_mask; if (item->mask) @@ -1580,7 +1629,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, } pattern_match_item = - ice_search_pattern_match_item(pattern, array, array_len, error); + ice_search_pattern_match_item(ad, pattern, array, array_len, + error); if (!pattern_match_item) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1603,12 +1653,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, rule_info.tun_type = tun_type; ret = ice_switch_check_action(actions, error); - if (ret) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid input action number"); + if (ret) goto error; - } if (ad->hw.dcf_enabled) ret = ice_switch_parse_dcf_action((void *)ad, actions, error, @@ -1616,12 +1662,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, else ret = ice_switch_parse_action(pf, actions, error, &rule_info); - if (ret) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid input action"); + if (ret) goto error; - } if (meta) { *meta = sw_meta_ptr; @@ -1746,19 +1788,15 @@ ice_switch_init(struct ice_adapter *ad) { int ret = 0; struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + struct ice_flow_parser *perm_parser; - if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) - dist_parser = &ice_switch_dist_parser_comms; - else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) - dist_parser = &ice_switch_dist_parser_os; - else - return -EINVAL; - - if (ad->devargs.pipe_mode_support) + if (ad->devargs.pipe_mode_support) { + perm_parser = &ice_switch_perm_parser; ret = ice_register_parser(perm_parser, ad); - else + } else { + dist_parser = &ice_switch_dist_parser; ret = ice_register_parser(dist_parser, ad); + } return ret; } @@ -1766,17 +1804,15 @@ static void ice_switch_uninit(struct ice_adapter *ad) { struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + struct ice_flow_parser *perm_parser; - if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) - dist_parser = &ice_switch_dist_parser_comms; - else - dist_parser = &ice_switch_dist_parser_os; - - if (ad->devargs.pipe_mode_support) + if (ad->devargs.pipe_mode_support) { + perm_parser = &ice_switch_perm_parser; ice_unregister_parser(perm_parser, ad); - else + } else { + dist_parser = &ice_switch_dist_parser; ice_unregister_parser(dist_parser, ad); + } } static struct @@ -1792,19 +1828,10 @@ ice_flow_engine ice_switch_engine = { }; static struct -ice_flow_parser ice_switch_dist_parser_os = { - .engine = &ice_switch_engine, - .array = ice_switch_pattern_dist_os, - .array_len = RTE_DIM(ice_switch_pattern_dist_os), - .parse_pattern_action = ice_switch_parse_pattern_action, - .stage = ICE_FLOW_STAGE_DISTRIBUTOR, -}; - -static struct -ice_flow_parser ice_switch_dist_parser_comms = { +ice_flow_parser ice_switch_dist_parser = { .engine = &ice_switch_engine, - .array = ice_switch_pattern_dist_comms, - .array_len = RTE_DIM(ice_switch_pattern_dist_comms), + .array = ice_switch_pattern_dist_list, + .array_len = RTE_DIM(ice_switch_pattern_dist_list), .parse_pattern_action = ice_switch_parse_pattern_action, .stage = ICE_FLOW_STAGE_DISTRIBUTOR, }; @@ -1812,8 +1839,8 @@ ice_flow_parser ice_switch_dist_parser_comms = { static struct ice_flow_parser ice_switch_perm_parser = { .engine = &ice_switch_engine, - .array = ice_switch_pattern_perm, - .array_len = RTE_DIM(ice_switch_pattern_perm), + .array = ice_switch_pattern_perm_list, + .array_len = RTE_DIM(ice_switch_pattern_perm_list), .parse_pattern_action = ice_switch_parse_pattern_action, .stage = ICE_FLOW_STAGE_PERMISSION, };