X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_switch_filter.c;h=4dfeeef50fe5bdae83aab282b6d3e4ba95f8aff4;hb=8d6004858c9058fb76514777ba88d392a4a61899;hp=dd3f4847aa434c1469a4048fe771877fad541cc6;hpb=0beafe7b042f5ea3814471ef6c98aa895d345d4d;p=dpdk.git diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index dd3f4847aa..4dfeeef50f 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -23,9 +23,14 @@ #include "ice_logs.h" #include "ice_ethdev.h" #include "ice_generic_flow.h" +#include "ice_dcf_ethdev.h" -#define MAX_QGRP_NUM_TYPE 7 +#define MAX_QGRP_NUM_TYPE 7 +#define MAX_INPUT_SET_BYTE 32 +#define ICE_PPP_IPV4_PROTO 0x0021 +#define ICE_PPP_IPV6_PROTO 0x0057 +#define ICE_IPV4_PROTO_NVGRE 0x002F #define ICE_SW_INSET_ETHER ( \ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) @@ -95,6 +100,18 @@ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \ ICE_INSET_PPPOE_PROTO) +#define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4) +#define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP) +#define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP) +#define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6) +#define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP) +#define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP) #define ICE_SW_INSET_MAC_IPV4_ESP ( \ ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI) #define ICE_SW_INSET_MAC_IPV6_ESP ( \ @@ -122,7 +139,42 @@ struct sw_meta { static struct ice_flow_parser ice_switch_dist_parser_os; static struct ice_flow_parser ice_switch_dist_parser_comms; -static struct ice_flow_parser ice_switch_perm_parser; +static struct ice_flow_parser ice_switch_perm_parser_os; +static struct ice_flow_parser ice_switch_perm_parser_comms; + +static struct +ice_pattern_match_item ice_switch_pattern_dist_os[] = { + {pattern_ethertype, + ICE_SW_INSET_ETHER, ICE_INSET_NONE}, + {pattern_ethertype_vlan, + ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4, + ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, + ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, + ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6, + ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, + ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, + ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, + ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, + ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, + ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, + ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, + ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, + ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, +}; static struct ice_pattern_match_item ice_switch_pattern_dist_comms[] = { @@ -130,6 +182,8 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { ICE_SW_INSET_ETHER, ICE_INSET_NONE}, {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, @@ -154,10 +208,6 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, {pattern_eth_vlan_pppoes, @@ -166,6 +216,30 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_udp, + ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_udp, + ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_udp, + ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_udp, + ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_esp, @@ -191,7 +265,7 @@ ice_pattern_match_item ice_switch_pattern_dist_comms[] = { }; static struct -ice_pattern_match_item ice_switch_pattern_dist_os[] = { +ice_pattern_match_item ice_switch_pattern_perm_os[] = { {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE}, {pattern_ethertype_vlan, @@ -211,25 +285,27 @@ ice_pattern_match_item ice_switch_pattern_dist_os[] = { {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4, - ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, - ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, - ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4, - ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_udp, - ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, - ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, + ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, }; static struct -ice_pattern_match_item ice_switch_pattern_perm[] = { +ice_pattern_match_item ice_switch_pattern_perm_comms[] = { {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE}, {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, + {pattern_eth_arp, + ICE_INSET_NONE, ICE_INSET_NONE}, {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, {pattern_eth_ipv4_udp, @@ -254,10 +330,6 @@ ice_pattern_match_item ice_switch_pattern_perm[] = { ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, {pattern_eth_vlan_pppoes, @@ -266,6 +338,30 @@ ice_pattern_match_item ice_switch_pattern_perm[] = { ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_udp, + ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_udp, + ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4, + ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_udp, + ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6, + ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_tcp, + ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_udp, + ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE}, {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE}, {pattern_eth_ipv4_udp_esp, @@ -416,13 +512,19 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; uint64_t input_set = ICE_INSET_NONE; - uint16_t j, t = 0; - bool profile_rule = 0; + uint16_t input_set_byte = 0; + bool pppoe_elem_valid = 0; + bool pppoe_patt_valid = 0; + bool pppoe_prot_valid = 0; bool tunnel_valid = 0; - bool pppoe_valid = 0; - bool ipv6_valiad = 0; - bool ipv4_valiad = 0; - bool udp_valiad = 0; + bool profile_rule = 0; + bool nvgre_valid = 0; + bool vxlan_valid = 0; + bool ipv6_valid = 0; + bool ipv4_valid = 0; + bool udp_valid = 0; + bool tcp_valid = 0; + uint16_t j, t = 0; for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { @@ -480,6 +582,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->src_addr[j] = eth_mask->src.addr_bytes[j]; i = 1; + input_set_byte++; } if (eth_mask->dst.addr_bytes[j]) { h->dst_addr[j] = @@ -487,6 +590,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->dst_addr[j] = eth_mask->dst.addr_bytes[j]; i = 1; + input_set_byte++; } } if (i) @@ -497,6 +601,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], eth_spec->type; list[t].m_u.ethertype.ethtype_id = eth_mask->type; + input_set_byte += 2; t++; } } @@ -505,7 +610,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV4: ipv4_spec = item->spec; ipv4_mask = item->mask; - ipv4_valiad = 1; + ipv4_valid = 1; if (ipv4_spec && ipv4_mask) { /* Check IPv4 mask and update input set */ if (ipv4_mask->hdr.version_ihl || @@ -556,30 +661,39 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv4_spec->hdr.src_addr; list[t].m_u.ipv4_hdr.src_addr = ipv4_mask->hdr.src_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.dst_addr) { list[t].h_u.ipv4_hdr.dst_addr = ipv4_spec->hdr.dst_addr; list[t].m_u.ipv4_hdr.dst_addr = ipv4_mask->hdr.dst_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.time_to_live) { list[t].h_u.ipv4_hdr.time_to_live = ipv4_spec->hdr.time_to_live; list[t].m_u.ipv4_hdr.time_to_live = ipv4_mask->hdr.time_to_live; + input_set_byte++; } if (ipv4_mask->hdr.next_proto_id) { list[t].h_u.ipv4_hdr.protocol = ipv4_spec->hdr.next_proto_id; list[t].m_u.ipv4_hdr.protocol = ipv4_mask->hdr.next_proto_id; + input_set_byte++; } + if ((ipv4_spec->hdr.next_proto_id & + ipv4_mask->hdr.next_proto_id) == + ICE_IPV4_PROTO_NVGRE) + *tun_type = ICE_SW_TUN_AND_NON_TUN; if (ipv4_mask->hdr.type_of_service) { list[t].h_u.ipv4_hdr.tos = ipv4_spec->hdr.type_of_service; list[t].m_u.ipv4_hdr.tos = ipv4_mask->hdr.type_of_service; + input_set_byte++; } t++; } @@ -588,7 +702,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV6: ipv6_spec = item->spec; ipv6_mask = item->mask; - ipv6_valiad = 1; + ipv6_valid = 1; if (ipv6_spec && ipv6_mask) { if (ipv6_mask->hdr.payload_len) { rte_flow_error_set(error, EINVAL, @@ -657,12 +771,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.src_addr[j]; s->src_addr[j] = ipv6_mask->hdr.src_addr[j]; + input_set_byte++; } if (ipv6_mask->hdr.dst_addr[j]) { f->dst_addr[j] = ipv6_spec->hdr.dst_addr[j]; s->dst_addr[j] = ipv6_mask->hdr.dst_addr[j]; + input_set_byte++; } } if (ipv6_mask->hdr.proto) { @@ -670,12 +786,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.proto; s->next_hdr = ipv6_mask->hdr.proto; + input_set_byte++; } if (ipv6_mask->hdr.hop_limits) { f->hop_limit = ipv6_spec->hdr.hop_limits; s->hop_limit = ipv6_mask->hdr.hop_limits; + input_set_byte++; } if (ipv6_mask->hdr.vtc_flow & rte_cpu_to_be_32 @@ -693,6 +811,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); + input_set_byte += 4; } t++; } @@ -701,7 +820,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; udp_mask = item->mask; - udp_valiad = 1; + udp_valid = 1; if (udp_spec && udp_mask) { /* Check UDP mask and update input set*/ if (udp_mask->hdr.dgram_len || @@ -738,20 +857,23 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], udp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = udp_mask->hdr.src_port; + input_set_byte += 2; } if (udp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = udp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = udp_mask->hdr.dst_port; + input_set_byte += 2; } - t++; + t++; } break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; tcp_mask = item->mask; + tcp_valid = 1; if (tcp_spec && tcp_mask) { /* Check TCP mask and update input set */ if (tcp_mask->hdr.sent_seq || @@ -789,12 +911,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], tcp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = tcp_mask->hdr.src_port; + input_set_byte += 2; } if (tcp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = tcp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = tcp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -834,12 +958,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], sctp_spec->hdr.src_port; list[t].m_u.sctp_hdr.src_port = sctp_mask->hdr.src_port; + input_set_byte += 2; } if (sctp_mask->hdr.dst_port) { list[t].h_u.sctp_hdr.dst_port = sctp_spec->hdr.dst_port; list[t].m_u.sctp_hdr.dst_port = sctp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -860,7 +986,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], "Invalid VXLAN item"); return 0; } - + vxlan_valid = 1; tunnel_valid = 1; if (vxlan_spec && vxlan_mask) { list[t].type = ICE_VXLAN; @@ -877,6 +1003,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], vxlan_mask->vni[0]; input_set |= ICE_INSET_TUN_VXLAN_VNI; + input_set_byte += 2; } t++; } @@ -897,6 +1024,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], "Invalid NVGRE item"); return 0; } + nvgre_valid = 1; tunnel_valid = 1; if (nvgre_spec && nvgre_mask) { list[t].type = ICE_NVGRE; @@ -913,6 +1041,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], nvgre_mask->tni[0]; input_set |= ICE_INSET_TUN_NVGRE_TNI; + input_set_byte += 2; } t++; } @@ -941,6 +1070,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.vlan_hdr.vlan = vlan_mask->tci; input_set |= ICE_INSET_VLAN_OUTER; + input_set_byte += 2; } if (vlan_mask->inner_type) { list[t].h_u.vlan_hdr.type = @@ -948,6 +1078,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.vlan_hdr.type = vlan_mask->inner_type; input_set |= ICE_INSET_ETHERTYPE; + input_set_byte += 2; } t++; } @@ -969,6 +1100,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], "Invalid pppoe item"); return 0; } + pppoe_patt_valid = 1; if (pppoe_spec && pppoe_mask) { /* Check pppoe mask and update input set */ if (pppoe_mask->length || @@ -987,9 +1119,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.session_id = pppoe_mask->session_id; input_set |= ICE_INSET_PPPOE_SESSION; + input_set_byte += 2; } t++; - pppoe_valid = 1; + pppoe_elem_valid = 1; } break; @@ -1010,7 +1143,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], return 0; } if (pppoe_proto_spec && pppoe_proto_mask) { - if (pppoe_valid) + if (pppoe_elem_valid) t--; list[t].type = ICE_PPPOE; if (pppoe_proto_mask->proto_id) { @@ -1019,9 +1152,21 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.pppoe_hdr.ppp_prot_id = pppoe_proto_mask->proto_id; input_set |= ICE_INSET_PPPOE_PROTO; + input_set_byte += 2; + pppoe_prot_valid = 1; } + if ((pppoe_proto_mask->proto_id & + pppoe_proto_spec->proto_id) != + CPU_TO_BE16(ICE_PPP_IPV4_PROTO) && + (pppoe_proto_mask->proto_id & + pppoe_proto_spec->proto_id) != + CPU_TO_BE16(ICE_PPP_IPV6_PROTO)) + *tun_type = ICE_SW_TUN_PPPOE_PAY; + else + *tun_type = ICE_SW_TUN_PPPOE; t++; } + break; case RTE_FLOW_ITEM_TYPE_ESP: @@ -1046,16 +1191,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], if (!esp_spec && !esp_mask && !input_set) { profile_rule = 1; - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (esp_spec && esp_mask && esp_mask->hdr.spi){ - if (udp_valiad) + if (udp_valid) list[t].type = ICE_NAT_T; else list[t].type = ICE_ESP; @@ -1064,17 +1209,18 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.esp_hdr.spi = esp_mask->hdr.spi; input_set |= ICE_INSET_ESP_SPI; + input_set_byte += 4; t++; } if (!profile_rule) { - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_IPV6_NAT_T; - else if (ipv4_valiad && udp_valiad) + else if (ipv4_valid && udp_valid) *tun_type = ICE_SW_TUN_IPV4_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_ESP; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_ESP; } break; @@ -1105,12 +1251,12 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], if (!ah_spec && !ah_mask && !input_set) { profile_rule = 1; - if (ipv6_valiad && udp_valiad) + if (ipv6_valid && udp_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_NAT_T; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_AH; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (ah_spec && ah_mask && ah_mask->spi){ @@ -1120,15 +1266,16 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.ah_hdr.spi = ah_mask->spi; input_set |= ICE_INSET_AH_SPI; + input_set_byte += 4; t++; } if (!profile_rule) { - if (udp_valiad) + if (udp_valid) return 0; - else if (ipv6_valiad) + else if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_AH; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_AH; } break; @@ -1146,10 +1293,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } if (!l2tp_spec && !l2tp_mask && !input_set) { - if (ipv6_valiad) + if (ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3; - else if (ipv4_valiad) + else if (ipv4_valid) return 0; } else if (l2tp_spec && l2tp_mask && l2tp_mask->session_id){ @@ -1159,14 +1306,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], list[t].m_u.l2tpv3_sess_hdr.session_id = l2tp_mask->session_id; input_set |= ICE_INSET_L2TPV3OIP_SESSION_ID; + input_set_byte += 4; t++; } if (!profile_rule) { - if (ipv6_valiad) + if (ipv6_valid) *tun_type = ICE_SW_TUN_IPV6_L2TPV3; - else if (ipv4_valiad) + else if (ipv4_valid) *tun_type = ICE_SW_TUN_IPV4_L2TPV3; } @@ -1200,7 +1348,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } if (pfcp_mask->s_field && pfcp_spec->s_field == 0x01 && - ipv6_valiad) + ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION; else if (pfcp_mask->s_field && @@ -1209,7 +1357,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION; else if (pfcp_mask->s_field && !pfcp_spec->s_field && - ipv6_valiad) + ipv6_valid) *tun_type = ICE_SW_TUN_PROFID_IPV6_PFCP_NODE; else if (pfcp_mask->s_field && @@ -1232,6 +1380,46 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], } } + if (pppoe_patt_valid && !pppoe_prot_valid) { + if (ipv6_valid && udp_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP; + else if (ipv6_valid && tcp_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP; + else if (ipv4_valid && udp_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP; + else if (ipv4_valid && tcp_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV6; + else if (ipv4_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV4; + else + *tun_type = ICE_SW_TUN_PPPOE; + } + + if (*tun_type == ICE_NON_TUN) { + if (vxlan_valid) + *tun_type = ICE_SW_TUN_VXLAN; + else if (nvgre_valid) + *tun_type = ICE_SW_TUN_NVGRE; + else if (ipv4_valid && tcp_valid) + *tun_type = ICE_SW_IPV4_TCP; + else if (ipv4_valid && udp_valid) + *tun_type = ICE_SW_IPV4_UDP; + else if (ipv6_valid && tcp_valid) + *tun_type = ICE_SW_IPV6_TCP; + else if (ipv6_valid && udp_valid) + *tun_type = ICE_SW_IPV6_UDP; + } + + if (input_set_byte > MAX_INPUT_SET_BYTE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "too much input set"); + return -ENOTSUP; + } + *lkups_num = t; return input_set; @@ -1240,7 +1428,8 @@ out: } static int -ice_switch_parse_dcf_action(const struct rte_flow_action *actions, +ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad, + const struct rte_flow_action *actions, struct rte_flow_error *error, struct ice_adv_rule_info *rule_info) { @@ -1255,13 +1444,27 @@ ice_switch_parse_dcf_action(const struct rte_flow_action *actions, case RTE_FLOW_ACTION_TYPE_VF: rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI; act_vf = action->conf; - rule_info->sw_act.vsi_handle = act_vf->id; + + if (act_vf->id >= ad->real_hw.num_vfs && + !act_vf->original) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid vf id"); + return -rte_errno; + } + + if (act_vf->original) + rule_info->sw_act.vsi_handle = + ad->real_hw.avf.bus.func; + else + rule_info->sw_act.vsi_handle = act_vf->id; break; default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, - "Invalid action type or queue number"); + "Invalid action type"); return -rte_errno; } } @@ -1313,11 +1516,11 @@ ice_switch_parse_action(struct ice_pf *pf, if ((act_qgrop->queue[0] + act_qgrop->queue_num) > dev->data->nb_rx_queues) - goto error; + goto error1; for (i = 0; i < act_qgrop->queue_num - 1; i++) if (act_qgrop->queue[i + 1] != act_qgrop->queue[i] + 1) - goto error; + goto error2; rule_info->sw_act.qgrp_size = act_qgrop->queue_num; break; @@ -1357,6 +1560,20 @@ error: actions, "Invalid action type or queue number"); return -rte_errno; + +error1: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid queue region indexes"); + return -rte_errno; + +error2: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Discontinuous queue region"); + return -rte_errno; } static int @@ -1388,7 +1605,7 @@ ice_switch_check_action(const struct rte_flow_action *actions, } } - if (actions_num > 1) { + if (actions_num != 1) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -1438,18 +1655,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, const struct rte_flow_item *item = pattern; uint16_t item_num = 0; enum ice_sw_tunnel_type tun_type = - ICE_SW_TUN_AND_NON_TUN; + ICE_NON_TUN; struct ice_pattern_match_item *pattern_match_item = NULL; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { item_num++; - if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) - tun_type = ICE_SW_TUN_VXLAN; - if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) - tun_type = ICE_SW_TUN_NVGRE; - if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED || - item->type == RTE_FLOW_ITEM_TYPE_PPPOES) - tun_type = ICE_SW_TUN_PPPOE; if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { const struct rte_flow_item_eth *eth_mask; if (item->mask) @@ -1507,24 +1717,17 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, rule_info.tun_type = tun_type; ret = ice_switch_check_action(actions, error); - if (ret) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid input action number"); + if (ret) goto error; - } if (ad->hw.dcf_enabled) - ret = ice_switch_parse_dcf_action(actions, error, &rule_info); + ret = ice_switch_parse_dcf_action((void *)ad, actions, error, + &rule_info); else ret = ice_switch_parse_action(pf, actions, error, &rule_info); - if (ret) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid input action"); + if (ret) goto error; - } if (meta) { *meta = sw_meta_ptr; @@ -1577,6 +1780,9 @@ ice_switch_redirect(struct ice_adapter *ad, uint16_t lkups_cnt; int ret; + if (rdata->vsi_handle != rd->vsi_handle) + return 0; + sw = hw->switch_info; if (!sw->recp_list[rdata->rid].recp_created) return -EINVAL; @@ -1588,25 +1794,32 @@ ice_switch_redirect(struct ice_adapter *ad, LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry, list_entry) { rinfo = list_itr->rule_info; - if (rinfo.fltr_rule_id == rdata->rule_id && + if ((rinfo.fltr_rule_id == rdata->rule_id && rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI && - rinfo.sw_act.vsi_handle == rd->vsi_handle) { + rinfo.sw_act.vsi_handle == rd->vsi_handle) || + (rinfo.fltr_rule_id == rdata->rule_id && + rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){ lkups_cnt = list_itr->lkups_cnt; lkups_dp = (struct ice_adv_lkup_elem *) ice_memdup(hw, list_itr->lkups, sizeof(*list_itr->lkups) * lkups_cnt, ICE_NONDMA_TO_NONDMA); + if (!lkups_dp) { PMD_DRV_LOG(ERR, "Failed to allocate memory."); return -EINVAL; } + if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { + rinfo.sw_act.vsi_handle = rd->vsi_handle; + rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI; + } break; } } if (!lkups_dp) - return 0; + return -EINVAL; /* Remove the old rule */ ret = ice_rem_adv_rule(hw, list_itr->lkups, @@ -1639,7 +1852,7 @@ ice_switch_init(struct ice_adapter *ad) { int ret = 0; struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + struct ice_flow_parser *perm_parser; if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) dist_parser = &ice_switch_dist_parser_comms; @@ -1648,10 +1861,16 @@ ice_switch_init(struct ice_adapter *ad) else return -EINVAL; - if (ad->devargs.pipe_mode_support) + if (ad->devargs.pipe_mode_support) { + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + perm_parser = &ice_switch_perm_parser_comms; + else + perm_parser = &ice_switch_perm_parser_os; + ret = ice_register_parser(perm_parser, ad); - else + } else { ret = ice_register_parser(dist_parser, ad); + } return ret; } @@ -1659,17 +1878,25 @@ static void ice_switch_uninit(struct ice_adapter *ad) { struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + struct ice_flow_parser *perm_parser; if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) dist_parser = &ice_switch_dist_parser_comms; - else + else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) dist_parser = &ice_switch_dist_parser_os; + else + return; + + if (ad->devargs.pipe_mode_support) { + if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) + perm_parser = &ice_switch_perm_parser_comms; + else + perm_parser = &ice_switch_perm_parser_os; - if (ad->devargs.pipe_mode_support) ice_unregister_parser(perm_parser, ad); - else + } else { ice_unregister_parser(dist_parser, ad); + } } static struct @@ -1703,10 +1930,19 @@ ice_flow_parser ice_switch_dist_parser_comms = { }; static struct -ice_flow_parser ice_switch_perm_parser = { +ice_flow_parser ice_switch_perm_parser_os = { + .engine = &ice_switch_engine, + .array = ice_switch_pattern_perm_os, + .array_len = RTE_DIM(ice_switch_pattern_perm_os), + .parse_pattern_action = ice_switch_parse_pattern_action, + .stage = ICE_FLOW_STAGE_PERMISSION, +}; + +static struct +ice_flow_parser ice_switch_perm_parser_comms = { .engine = &ice_switch_engine, - .array = ice_switch_pattern_perm, - .array_len = RTE_DIM(ice_switch_pattern_perm), + .array = ice_switch_pattern_perm_comms, + .array_len = RTE_DIM(ice_switch_pattern_perm_comms), .parse_pattern_action = ice_switch_parse_pattern_action, .stage = ICE_FLOW_STAGE_PERMISSION, };