X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fice%2Fice_switch_filter.c;h=eeed386c63c65a2065b25ac021ddcdb78cfc0561;hb=05b405d581486651305551a9f7295f40388d95db;hp=d9bdf9637efc692c1b9796a9e06206a8002aa96b;hpb=6bc7628c5e0b9c15dd2784bdf45546e8284d6f68;p=dpdk.git diff --git a/drivers/net/ice/ice_switch_filter.c b/drivers/net/ice/ice_switch_filter.c index d9bdf9637e..eeed386c63 100644 --- a/drivers/net/ice/ice_switch_filter.c +++ b/drivers/net/ice/ice_switch_filter.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include #include @@ -23,18 +23,27 @@ #include "ice_logs.h" #include "ice_ethdev.h" #include "ice_generic_flow.h" +#include "ice_dcf_ethdev.h" -#define MAX_QGRP_NUM_TYPE 7 +#define MAX_QGRP_NUM_TYPE 7 +#define MAX_INPUT_SET_BYTE 32 +#define ICE_PPP_IPV4_PROTO 0x0021 +#define ICE_PPP_IPV6_PROTO 0x0057 +#define ICE_IPV4_PROTO_NVGRE 0x002F #define ICE_SW_INSET_ETHER ( \ ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE) #define ICE_SW_INSET_MAC_VLAN ( \ - ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_ETHERTYPE | \ - ICE_INSET_VLAN_OUTER) + ICE_SW_INSET_ETHER | ICE_INSET_VLAN_INNER) +#define ICE_SW_INSET_MAC_QINQ ( \ + ICE_INSET_DMAC | ICE_INSET_SMAC | ICE_INSET_VLAN_INNER | \ + ICE_INSET_VLAN_OUTER) #define ICE_SW_INSET_MAC_IPV4 ( \ ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS) +#define ICE_SW_INSET_MAC_QINQ_IPV4 ( \ + ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV4) #define ICE_SW_INSET_MAC_IPV4_TCP ( \ ICE_INSET_DMAC | ICE_INSET_IPV4_DST | ICE_INSET_IPV4_SRC | \ ICE_INSET_IPV4_TTL | ICE_INSET_IPV4_TOS | \ @@ -47,6 +56,8 @@ ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ ICE_INSET_IPV6_TC | ICE_INSET_IPV6_HOP_LIMIT | \ ICE_INSET_IPV6_NEXT_HDR) +#define ICE_SW_INSET_MAC_QINQ_IPV6 ( \ + ICE_SW_INSET_MAC_QINQ | ICE_SW_INSET_MAC_IPV6) #define ICE_SW_INSET_MAC_IPV6_TCP ( \ ICE_INSET_DMAC | ICE_INSET_IPV6_DST | ICE_INSET_IPV6_SRC | \ ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \ @@ -56,38 +67,38 @@ ICE_INSET_IPV6_HOP_LIMIT | ICE_INSET_IPV6_TC | \ ICE_INSET_UDP_DST_PORT | ICE_INSET_UDP_SRC_PORT) #define ICE_SW_INSET_DIST_NVGRE_IPV4 ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \ + ICE_INSET_NVGRE_TNI) #define ICE_SW_INSET_DIST_VXLAN_IPV4 ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | ICE_INSET_DMAC | \ + ICE_INSET_VXLAN_VNI) #define ICE_SW_INSET_DIST_NVGRE_IPV4_TCP ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \ - ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \ + ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI) #define ICE_SW_INSET_DIST_NVGRE_IPV4_UDP ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \ - ICE_INSET_TUN_DMAC | ICE_INSET_TUN_NVGRE_TNI | ICE_INSET_IPV4_DST) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \ + ICE_INSET_DMAC | ICE_INSET_NVGRE_TNI) #define ICE_SW_INSET_DIST_VXLAN_IPV4_TCP ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \ - ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \ + ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI) #define ICE_SW_INSET_DIST_VXLAN_IPV4_UDP ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \ - ICE_INSET_TUN_DMAC | ICE_INSET_TUN_VXLAN_VNI | ICE_INSET_IPV4_DST) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \ + ICE_INSET_DMAC | ICE_INSET_VXLAN_VNI) #define ICE_SW_INSET_PERM_TUNNEL_IPV4 ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_IPV4_PROTO | ICE_INSET_TUN_IPV4_TOS) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_IPV4_PROTO | ICE_INSET_IPV4_TOS) #define ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_TCP_SRC_PORT | ICE_INSET_TUN_TCP_DST_PORT | \ - ICE_INSET_TUN_IPV4_TOS) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_TCP_SRC_PORT | ICE_INSET_TCP_DST_PORT | \ + ICE_INSET_IPV4_TOS) #define ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP ( \ - ICE_INSET_TUN_IPV4_SRC | ICE_INSET_TUN_IPV4_DST | \ - ICE_INSET_TUN_UDP_SRC_PORT | ICE_INSET_TUN_UDP_DST_PORT | \ - ICE_INSET_TUN_IPV4_TOS) + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST | \ + ICE_INSET_UDP_SRC_PORT | ICE_INSET_UDP_DST_PORT | \ + ICE_INSET_IPV4_TOS) #define ICE_SW_INSET_MAC_PPPOE ( \ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION) @@ -95,6 +106,60 @@ ICE_INSET_VLAN_OUTER | ICE_INSET_VLAN_INNER | \ ICE_INSET_DMAC | ICE_INSET_ETHERTYPE | ICE_INSET_PPPOE_SESSION | \ ICE_INSET_PPPOE_PROTO) +#define ICE_SW_INSET_MAC_PPPOE_IPV4 ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4) +#define ICE_SW_INSET_MAC_PPPOE_IPV4_TCP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_TCP) +#define ICE_SW_INSET_MAC_PPPOE_IPV4_UDP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV4_UDP) +#define ICE_SW_INSET_MAC_PPPOE_IPV6 ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6) +#define ICE_SW_INSET_MAC_PPPOE_IPV6_TCP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_TCP) +#define ICE_SW_INSET_MAC_PPPOE_IPV6_UDP ( \ + ICE_SW_INSET_MAC_PPPOE | ICE_SW_INSET_MAC_IPV6_UDP) +#define ICE_SW_INSET_MAC_IPV4_ESP ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_ESP_SPI) +#define ICE_SW_INSET_MAC_IPV6_ESP ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_ESP_SPI) +#define ICE_SW_INSET_MAC_IPV4_AH ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_AH_SPI) +#define ICE_SW_INSET_MAC_IPV6_AH ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_AH_SPI) +#define ICE_SW_INSET_MAC_IPV4_L2TP ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_L2TPV3OIP_SESSION_ID) +#define ICE_SW_INSET_MAC_IPV6_L2TP ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_L2TPV3OIP_SESSION_ID) +#define ICE_SW_INSET_MAC_IPV4_PFCP ( \ + ICE_SW_INSET_MAC_IPV4 | \ + ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID) +#define ICE_SW_INSET_MAC_IPV6_PFCP ( \ + ICE_SW_INSET_MAC_IPV6 | \ + ICE_INSET_PFCP_S_FIELD | ICE_INSET_PFCP_SEID) +#define ICE_SW_INSET_MAC_IPV4_GTPU ( \ + ICE_SW_INSET_MAC_IPV4 | ICE_INSET_GTPU_TEID) +#define ICE_SW_INSET_MAC_IPV6_GTPU ( \ + ICE_SW_INSET_MAC_IPV6 | ICE_INSET_GTPU_TEID) +#define ICE_SW_INSET_MAC_GTPU_OUTER ( \ + ICE_INSET_DMAC | ICE_INSET_GTPU_TEID) +#define ICE_SW_INSET_MAC_GTPU_EH_OUTER ( \ + ICE_SW_INSET_MAC_GTPU_OUTER | ICE_INSET_GTPU_QFI) +#define ICE_SW_INSET_GTPU_IPV4 ( \ + ICE_INSET_IPV4_SRC | ICE_INSET_IPV4_DST) +#define ICE_SW_INSET_GTPU_IPV6 ( \ + ICE_INSET_IPV6_SRC | ICE_INSET_IPV6_DST) +#define ICE_SW_INSET_GTPU_IPV4_UDP ( \ + ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_UDP_SRC_PORT | \ + ICE_INSET_UDP_DST_PORT) +#define ICE_SW_INSET_GTPU_IPV4_TCP ( \ + ICE_SW_INSET_GTPU_IPV4 | ICE_INSET_TCP_SRC_PORT | \ + ICE_INSET_TCP_DST_PORT) +#define ICE_SW_INSET_GTPU_IPV6_UDP ( \ + ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_UDP_SRC_PORT | \ + ICE_INSET_UDP_DST_PORT) +#define ICE_SW_INSET_GTPU_IPV6_TCP ( \ + ICE_SW_INSET_GTPU_IPV6 | ICE_INSET_TCP_SRC_PORT | \ + ICE_INSET_TCP_DST_PORT) struct sw_meta { struct ice_adv_lkup_elem *list; @@ -102,116 +167,165 @@ struct sw_meta { struct ice_adv_rule_info rule_info; }; -static struct ice_flow_parser ice_switch_dist_parser_os; -static struct ice_flow_parser ice_switch_dist_parser_comms; +static struct ice_flow_parser ice_switch_dist_parser; static struct ice_flow_parser ice_switch_perm_parser; static struct -ice_pattern_match_item ice_switch_pattern_dist_comms[] = { - {pattern_ethertype, - ICE_SW_INSET_ETHER, ICE_INSET_NONE}, - {pattern_ethertype_vlan, - ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, - {pattern_eth_ipv4, - ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp, - ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_tcp, - ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv6, - ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, - {pattern_eth_ipv6_udp, - ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv6_tcp, - ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4, - ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, - ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, - ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4, - ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_udp, - ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, - ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoed, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, - {pattern_eth_pppoes, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes, - ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE}, - {pattern_eth_pppoes_proto, - ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, - {pattern_eth_vlan_pppoes_proto, - ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE}, +ice_pattern_match_item ice_switch_pattern_dist_list[] = { + {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_IPV4_DST, ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, }; static struct -ice_pattern_match_item ice_switch_pattern_dist_os[] = { - {pattern_ethertype, - ICE_SW_INSET_ETHER, ICE_INSET_NONE}, - {pattern_ethertype_vlan, - ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, - {pattern_eth_arp, - ICE_INSET_NONE, ICE_INSET_NONE}, - {pattern_eth_ipv4, - ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp, - ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_tcp, - ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv6, - ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, - {pattern_eth_ipv6_udp, - ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv6_tcp, - ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4, - ICE_SW_INSET_DIST_VXLAN_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, - ICE_SW_INSET_DIST_VXLAN_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, - ICE_SW_INSET_DIST_VXLAN_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4, - ICE_SW_INSET_DIST_NVGRE_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_udp, - ICE_SW_INSET_DIST_NVGRE_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, - ICE_SW_INSET_DIST_NVGRE_IPV4_TCP, ICE_INSET_NONE}, -}; - -static struct -ice_pattern_match_item ice_switch_pattern_perm[] = { - {pattern_ethertype_vlan, - ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE}, - {pattern_eth_ipv4, - ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp, - ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_tcp, - ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv6, - ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE}, - {pattern_eth_ipv6_udp, - ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv6_tcp, - ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4, - ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, - ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, - ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4, - ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_udp, - ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, - {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, - ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, +ice_pattern_match_item ice_switch_pattern_perm_list[] = { + {pattern_ethertype, ICE_SW_INSET_ETHER, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_ethertype_vlan, ICE_SW_INSET_MAC_VLAN, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_ethertype_qinq, ICE_SW_INSET_MAC_QINQ, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_arp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4, ICE_SW_INSET_MAC_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp, ICE_SW_INSET_MAC_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_tcp, ICE_SW_INSET_MAC_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6, ICE_SW_INSET_MAC_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp, ICE_SW_INSET_MAC_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_tcp, ICE_SW_INSET_MAC_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_vxlan_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_udp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_nvgre_eth_ipv4_tcp, ICE_INSET_NONE, ICE_SW_INSET_PERM_TUNNEL_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_tcp, ICE_SW_INSET_MAC_PPPOE_IPV4_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv4_udp, ICE_SW_INSET_MAC_PPPOE_IPV4_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_tcp, ICE_SW_INSET_MAC_PPPOE_IPV6_TCP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_vlan_pppoes_ipv6_udp, ICE_SW_INSET_MAC_PPPOE_IPV6_UDP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_udp_esp, ICE_SW_INSET_MAC_IPV4_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_esp, ICE_SW_INSET_MAC_IPV6_ESP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_ah, ICE_SW_INSET_MAC_IPV4_AH, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_ah, ICE_SW_INSET_MAC_IPV6_AH, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_udp_ah, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_l2tp, ICE_SW_INSET_MAC_IPV4_L2TP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_l2tp, ICE_SW_INSET_MAC_IPV6_L2TP, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_pfcp, ICE_INSET_NONE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_ipv4, ICE_SW_INSET_MAC_QINQ_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_ipv6, ICE_SW_INSET_MAC_QINQ_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes, ICE_SW_INSET_MAC_PPPOE, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_proto, ICE_SW_INSET_MAC_PPPOE_PROTO, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_ipv4, ICE_SW_INSET_MAC_PPPOE_IPV4, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_qinq_pppoes_ipv6, ICE_SW_INSET_MAC_PPPOE_IPV6, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu, ICE_SW_INSET_MAC_IPV4_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu, ICE_SW_INSET_MAC_IPV6_GTPU, ICE_INSET_NONE, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv4_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv4, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv4, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv4_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv4_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv4_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV4_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv6, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv6, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv6_udp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv6_udp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_UDP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, + {pattern_eth_ipv6_gtpu_eh_ipv6_tcp, ICE_SW_INSET_MAC_GTPU_EH_OUTER, ICE_SW_INSET_GTPU_IPV6_TCP, ICE_INSET_NONE}, }; static int @@ -314,12 +428,13 @@ ice_switch_filter_rule_free(struct rte_flow *flow) rte_free(flow->rule); } -static uint64_t -ice_switch_inset_get(const struct rte_flow_item pattern[], +static bool +ice_switch_parse_pattern(const struct rte_flow_item pattern[], struct rte_flow_error *error, struct ice_adv_lkup_elem *list, uint16_t *lkups_num, - enum ice_sw_tunnel_type tun_type) + enum ice_sw_tunnel_type *tun_type, + const struct ice_pattern_match_item *pattern_match_item) { const struct rte_flow_item *item = pattern; enum rte_flow_item_type item_type; @@ -335,11 +450,41 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], const struct rte_flow_item_pppoe *pppoe_spec, *pppoe_mask; const struct rte_flow_item_pppoe_proto_id *pppoe_proto_spec, *pppoe_proto_mask; - uint64_t input_set = ICE_INSET_NONE; - uint16_t j, t = 0; - uint16_t tunnel_valid = 0; - uint16_t pppoe_valid = 0; - + const struct rte_flow_item_esp *esp_spec, *esp_mask; + const struct rte_flow_item_ah *ah_spec, *ah_mask; + const struct rte_flow_item_l2tpv3oip *l2tp_spec, *l2tp_mask; + const struct rte_flow_item_pfcp *pfcp_spec, *pfcp_mask; + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask; + const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask; + uint64_t outer_input_set = ICE_INSET_NONE; + uint64_t inner_input_set = ICE_INSET_NONE; + uint64_t *input = NULL; + uint16_t input_set_byte = 0; + bool pppoe_elem_valid = 0; + bool pppoe_patt_valid = 0; + bool pppoe_prot_valid = 0; + bool inner_vlan_valid = 0; + bool outer_vlan_valid = 0; + bool tunnel_valid = 0; + bool profile_rule = 0; + bool nvgre_valid = 0; + bool vxlan_valid = 0; + bool qinq_valid = 0; + bool ipv6_valid = 0; + bool ipv4_valid = 0; + bool udp_valid = 0; + bool tcp_valid = 0; + bool gtpu_valid = 0; + bool gtpu_psc_valid = 0; + bool inner_ipv4_valid = 0; + bool inner_ipv6_valid = 0; + bool inner_tcp_valid = 0; + bool inner_udp_valid = 0; + uint16_t j, k, t = 0; + + if (*tun_type == ICE_SW_TUN_AND_NON_TUN_QINQ || + *tun_type == ICE_NON_TUN_QINQ) + qinq_valid = 1; for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { @@ -348,7 +493,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Not support range"); - return 0; + return false; } item_type = item->type; @@ -359,30 +504,24 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], if (eth_spec && eth_mask) { const uint8_t *a = eth_mask->src.addr_bytes; const uint8_t *b = eth_mask->dst.addr_bytes; + if (tunnel_valid) + input = &inner_input_set; + else + input = &outer_input_set; for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { - if (a[j] && tunnel_valid) { - input_set |= - ICE_INSET_TUN_SMAC; - break; - } else if (a[j]) { - input_set |= - ICE_INSET_SMAC; + if (a[j]) { + *input |= ICE_INSET_SMAC; break; } } for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) { - if (b[j] && tunnel_valid) { - input_set |= - ICE_INSET_TUN_DMAC; - break; - } else if (b[j]) { - input_set |= - ICE_INSET_DMAC; + if (b[j]) { + *input |= ICE_INSET_DMAC; break; } } if (eth_mask->type) - input_set |= ICE_INSET_ETHERTYPE; + *input |= ICE_INSET_ETHERTYPE; list[t].type = (tunnel_valid == 0) ? ICE_MAC_OFOS : ICE_MAC_IL; struct ice_ether_hdr *h; @@ -397,6 +536,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->src_addr[j] = eth_mask->src.addr_bytes[j]; i = 1; + input_set_byte++; } if (eth_mask->dst.addr_bytes[j]) { h->dst_addr[j] = @@ -404,6 +544,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], m->dst_addr[j] = eth_mask->dst.addr_bytes[j]; i = 1; + input_set_byte++; } } if (i) @@ -414,6 +555,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], eth_spec->type; list[t].m_u.ethertype.ethtype_id = eth_mask->type; + input_set_byte += 2; t++; } } @@ -422,6 +564,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV4: ipv4_spec = item->spec; ipv4_mask = item->mask; + if (tunnel_valid) { + inner_ipv4_valid = 1; + input = &inner_input_set; + } else { + ipv4_valid = 1; + input = &outer_input_set; + } + if (ipv4_spec && ipv4_mask) { /* Check IPv4 mask and update input set */ if (ipv4_mask->hdr.version_ihl || @@ -432,39 +582,20 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid IPv4 mask."); - return 0; + return false; } - if (tunnel_valid) { - if (ipv4_mask->hdr.type_of_service) - input_set |= - ICE_INSET_TUN_IPV4_TOS; - if (ipv4_mask->hdr.src_addr) - input_set |= - ICE_INSET_TUN_IPV4_SRC; - if (ipv4_mask->hdr.dst_addr) - input_set |= - ICE_INSET_TUN_IPV4_DST; - if (ipv4_mask->hdr.time_to_live) - input_set |= - ICE_INSET_TUN_IPV4_TTL; - if (ipv4_mask->hdr.next_proto_id) - input_set |= - ICE_INSET_TUN_IPV4_PROTO; - } else { - if (ipv4_mask->hdr.src_addr) - input_set |= ICE_INSET_IPV4_SRC; - if (ipv4_mask->hdr.dst_addr) - input_set |= ICE_INSET_IPV4_DST; - if (ipv4_mask->hdr.time_to_live) - input_set |= ICE_INSET_IPV4_TTL; - if (ipv4_mask->hdr.next_proto_id) - input_set |= - ICE_INSET_IPV4_PROTO; - if (ipv4_mask->hdr.type_of_service) - input_set |= - ICE_INSET_IPV4_TOS; - } + if (ipv4_mask->hdr.src_addr) + *input |= ICE_INSET_IPV4_SRC; + if (ipv4_mask->hdr.dst_addr) + *input |= ICE_INSET_IPV4_DST; + if (ipv4_mask->hdr.time_to_live) + *input |= ICE_INSET_IPV4_TTL; + if (ipv4_mask->hdr.next_proto_id) + *input |= ICE_INSET_IPV4_PROTO; + if (ipv4_mask->hdr.type_of_service) + *input |= ICE_INSET_IPV4_TOS; + list[t].type = (tunnel_valid == 0) ? ICE_IPV4_OFOS : ICE_IPV4_IL; if (ipv4_mask->hdr.src_addr) { @@ -472,30 +603,39 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv4_spec->hdr.src_addr; list[t].m_u.ipv4_hdr.src_addr = ipv4_mask->hdr.src_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.dst_addr) { list[t].h_u.ipv4_hdr.dst_addr = ipv4_spec->hdr.dst_addr; list[t].m_u.ipv4_hdr.dst_addr = ipv4_mask->hdr.dst_addr; + input_set_byte += 2; } if (ipv4_mask->hdr.time_to_live) { list[t].h_u.ipv4_hdr.time_to_live = ipv4_spec->hdr.time_to_live; list[t].m_u.ipv4_hdr.time_to_live = ipv4_mask->hdr.time_to_live; + input_set_byte++; } if (ipv4_mask->hdr.next_proto_id) { list[t].h_u.ipv4_hdr.protocol = ipv4_spec->hdr.next_proto_id; list[t].m_u.ipv4_hdr.protocol = ipv4_mask->hdr.next_proto_id; + input_set_byte++; } + if ((ipv4_spec->hdr.next_proto_id & + ipv4_mask->hdr.next_proto_id) == + ICE_IPV4_PROTO_NVGRE) + *tun_type = ICE_SW_TUN_AND_NON_TUN; if (ipv4_mask->hdr.type_of_service) { list[t].h_u.ipv4_hdr.tos = ipv4_spec->hdr.type_of_service; list[t].m_u.ipv4_hdr.tos = ipv4_mask->hdr.type_of_service; + input_set_byte++; } t++; } @@ -504,61 +644,42 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_IPV6: ipv6_spec = item->spec; ipv6_mask = item->mask; + if (tunnel_valid) { + inner_ipv6_valid = 1; + input = &inner_input_set; + } else { + ipv6_valid = 1; + input = &outer_input_set; + } + if (ipv6_spec && ipv6_mask) { if (ipv6_mask->hdr.payload_len) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid IPv6 mask"); - return 0; + return false; } for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) { - if (ipv6_mask->hdr.src_addr[j] && - tunnel_valid) { - input_set |= - ICE_INSET_TUN_IPV6_SRC; - break; - } else if (ipv6_mask->hdr.src_addr[j]) { - input_set |= ICE_INSET_IPV6_SRC; + if (ipv6_mask->hdr.src_addr[j]) { + *input |= ICE_INSET_IPV6_SRC; break; } } for (j = 0; j < ICE_IPV6_ADDR_LENGTH; j++) { - if (ipv6_mask->hdr.dst_addr[j] && - tunnel_valid) { - input_set |= - ICE_INSET_TUN_IPV6_DST; - break; - } else if (ipv6_mask->hdr.dst_addr[j]) { - input_set |= ICE_INSET_IPV6_DST; + if (ipv6_mask->hdr.dst_addr[j]) { + *input |= ICE_INSET_IPV6_DST; break; } } - if (ipv6_mask->hdr.proto && - tunnel_valid) - input_set |= - ICE_INSET_TUN_IPV6_NEXT_HDR; - else if (ipv6_mask->hdr.proto) - input_set |= - ICE_INSET_IPV6_NEXT_HDR; - if (ipv6_mask->hdr.hop_limits && - tunnel_valid) - input_set |= - ICE_INSET_TUN_IPV6_HOP_LIMIT; - else if (ipv6_mask->hdr.hop_limits) - input_set |= - ICE_INSET_IPV6_HOP_LIMIT; - if ((ipv6_mask->hdr.vtc_flow & - rte_cpu_to_be_32 - (RTE_IPV6_HDR_TC_MASK)) && - tunnel_valid) - input_set |= - ICE_INSET_TUN_IPV6_TC; - else if (ipv6_mask->hdr.vtc_flow & - rte_cpu_to_be_32 - (RTE_IPV6_HDR_TC_MASK)) - input_set |= ICE_INSET_IPV6_TC; + if (ipv6_mask->hdr.proto) + *input |= ICE_INSET_IPV6_NEXT_HDR; + if (ipv6_mask->hdr.hop_limits) + *input |= ICE_INSET_IPV6_HOP_LIMIT; + if (ipv6_mask->hdr.vtc_flow & + rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK)) + *input |= ICE_INSET_IPV6_TC; list[t].type = (tunnel_valid == 0) ? ICE_IPV6_OFOS : ICE_IPV6_IL; @@ -572,12 +693,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.src_addr[j]; s->src_addr[j] = ipv6_mask->hdr.src_addr[j]; + input_set_byte++; } if (ipv6_mask->hdr.dst_addr[j]) { f->dst_addr[j] = ipv6_spec->hdr.dst_addr[j]; s->dst_addr[j] = ipv6_mask->hdr.dst_addr[j]; + input_set_byte++; } } if (ipv6_mask->hdr.proto) { @@ -585,12 +708,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], ipv6_spec->hdr.proto; s->next_hdr = ipv6_mask->hdr.proto; + input_set_byte++; } if (ipv6_mask->hdr.hop_limits) { f->hop_limit = ipv6_spec->hdr.hop_limits; s->hop_limit = ipv6_mask->hdr.hop_limits; + input_set_byte++; } if (ipv6_mask->hdr.vtc_flow & rte_cpu_to_be_32 @@ -608,6 +733,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_IPV6_HDR_TC_MASK) >> RTE_IPV6_HDR_TC_SHIFT; s->be_ver_tc_flow = CPU_TO_BE32(vtf.u.val); + input_set_byte += 4; } t++; } @@ -616,6 +742,14 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], case RTE_FLOW_ITEM_TYPE_UDP: udp_spec = item->spec; udp_mask = item->mask; + if (tunnel_valid) { + inner_udp_valid = 1; + input = &inner_input_set; + } else { + udp_valid = 1; + input = &outer_input_set; + } + if (udp_spec && udp_mask) { /* Check UDP mask and update input set*/ if (udp_mask->hdr.dgram_len || @@ -624,25 +758,15 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid UDP mask"); - return 0; + return false; } - if (tunnel_valid) { - if (udp_mask->hdr.src_port) - input_set |= - ICE_INSET_TUN_UDP_SRC_PORT; - if (udp_mask->hdr.dst_port) - input_set |= - ICE_INSET_TUN_UDP_DST_PORT; - } else { - if (udp_mask->hdr.src_port) - input_set |= - ICE_INSET_UDP_SRC_PORT; - if (udp_mask->hdr.dst_port) - input_set |= - ICE_INSET_UDP_DST_PORT; - } - if (tun_type == ICE_SW_TUN_VXLAN && + if (udp_mask->hdr.src_port) + *input |= ICE_INSET_UDP_SRC_PORT; + if (udp_mask->hdr.dst_port) + *input |= ICE_INSET_UDP_DST_PORT; + + if (*tun_type == ICE_SW_TUN_VXLAN && tunnel_valid == 0) list[t].type = ICE_UDP_OF; else @@ -652,20 +776,30 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], udp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = udp_mask->hdr.src_port; + input_set_byte += 2; } if (udp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = udp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = udp_mask->hdr.dst_port; + input_set_byte += 2; } - t++; + t++; } break; case RTE_FLOW_ITEM_TYPE_TCP: tcp_spec = item->spec; tcp_mask = item->mask; + if (tunnel_valid) { + inner_tcp_valid = 1; + input = &inner_input_set; + } else { + tcp_valid = 1; + input = &outer_input_set; + } + if (tcp_spec && tcp_mask) { /* Check TCP mask and update input set */ if (tcp_mask->hdr.sent_seq || @@ -679,36 +813,27 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid TCP mask"); - return 0; + return false; } - if (tunnel_valid) { - if (tcp_mask->hdr.src_port) - input_set |= - ICE_INSET_TUN_TCP_SRC_PORT; - if (tcp_mask->hdr.dst_port) - input_set |= - ICE_INSET_TUN_TCP_DST_PORT; - } else { - if (tcp_mask->hdr.src_port) - input_set |= - ICE_INSET_TCP_SRC_PORT; - if (tcp_mask->hdr.dst_port) - input_set |= - ICE_INSET_TCP_DST_PORT; - } + if (tcp_mask->hdr.src_port) + *input |= ICE_INSET_TCP_SRC_PORT; + if (tcp_mask->hdr.dst_port) + *input |= ICE_INSET_TCP_DST_PORT; list[t].type = ICE_TCP_IL; if (tcp_mask->hdr.src_port) { list[t].h_u.l4_hdr.src_port = tcp_spec->hdr.src_port; list[t].m_u.l4_hdr.src_port = tcp_mask->hdr.src_port; + input_set_byte += 2; } if (tcp_mask->hdr.dst_port) { list[t].h_u.l4_hdr.dst_port = tcp_spec->hdr.dst_port; list[t].m_u.l4_hdr.dst_port = tcp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -724,36 +849,32 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid SCTP mask"); - return 0; + return false; } + if (tunnel_valid) + input = &inner_input_set; + else + input = &outer_input_set; + + if (sctp_mask->hdr.src_port) + *input |= ICE_INSET_SCTP_SRC_PORT; + if (sctp_mask->hdr.dst_port) + *input |= ICE_INSET_SCTP_DST_PORT; - if (tunnel_valid) { - if (sctp_mask->hdr.src_port) - input_set |= - ICE_INSET_TUN_SCTP_SRC_PORT; - if (sctp_mask->hdr.dst_port) - input_set |= - ICE_INSET_TUN_SCTP_DST_PORT; - } else { - if (sctp_mask->hdr.src_port) - input_set |= - ICE_INSET_SCTP_SRC_PORT; - if (sctp_mask->hdr.dst_port) - input_set |= - ICE_INSET_SCTP_DST_PORT; - } list[t].type = ICE_SCTP_IL; if (sctp_mask->hdr.src_port) { list[t].h_u.sctp_hdr.src_port = sctp_spec->hdr.src_port; list[t].m_u.sctp_hdr.src_port = sctp_mask->hdr.src_port; + input_set_byte += 2; } if (sctp_mask->hdr.dst_port) { list[t].h_u.sctp_hdr.dst_port = sctp_spec->hdr.dst_port; list[t].m_u.sctp_hdr.dst_port = sctp_mask->hdr.dst_port; + input_set_byte += 2; } t++; } @@ -772,10 +893,11 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid VXLAN item"); - return 0; + return false; } - + vxlan_valid = 1; tunnel_valid = 1; + input = &inner_input_set; if (vxlan_spec && vxlan_mask) { list[t].type = ICE_VXLAN; if (vxlan_mask->vni[0] || @@ -789,8 +911,8 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], (vxlan_mask->vni[2] << 16) | (vxlan_mask->vni[1] << 8) | vxlan_mask->vni[0]; - input_set |= - ICE_INSET_TUN_VXLAN_VNI; + *input |= ICE_INSET_VXLAN_VNI; + input_set_byte += 2; } t++; } @@ -809,9 +931,11 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid NVGRE item"); - return 0; + return false; } + nvgre_valid = 1; tunnel_valid = 1; + input = &inner_input_set; if (nvgre_spec && nvgre_mask) { list[t].type = ICE_NVGRE; if (nvgre_mask->tni[0] || @@ -825,8 +949,8 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], (nvgre_mask->tni[2] << 16) | (nvgre_mask->tni[1] << 8) | nvgre_mask->tni[0]; - input_set |= - ICE_INSET_TUN_NVGRE_TNI; + *input |= ICE_INSET_NVGRE_TNI; + input_set_byte += 2; } t++; } @@ -845,23 +969,47 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid VLAN item"); - return 0; + return false; } + + if (qinq_valid) { + if (!outer_vlan_valid) + outer_vlan_valid = 1; + else + inner_vlan_valid = 1; + } + + input = &outer_input_set; + if (vlan_spec && vlan_mask) { - list[t].type = ICE_VLAN_OFOS; + if (qinq_valid) { + if (!inner_vlan_valid) { + list[t].type = ICE_VLAN_EX; + *input |= + ICE_INSET_VLAN_OUTER; + } else { + list[t].type = ICE_VLAN_IN; + *input |= + ICE_INSET_VLAN_INNER; + } + } else { + list[t].type = ICE_VLAN_OFOS; + *input |= ICE_INSET_VLAN_INNER; + } + if (vlan_mask->tci) { list[t].h_u.vlan_hdr.vlan = vlan_spec->tci; list[t].m_u.vlan_hdr.vlan = vlan_mask->tci; - input_set |= ICE_INSET_VLAN_OUTER; + input_set_byte += 2; } if (vlan_mask->inner_type) { - list[t].h_u.vlan_hdr.type = - vlan_spec->inner_type; - list[t].m_u.vlan_hdr.type = - vlan_mask->inner_type; - input_set |= ICE_INSET_VLAN_OUTER; + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid VLAN input set."); + return false; } t++; } @@ -881,8 +1029,10 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid pppoe item"); - return 0; + return false; } + pppoe_patt_valid = 1; + input = &outer_input_set; if (pppoe_spec && pppoe_mask) { /* Check pppoe mask and update input set */ if (pppoe_mask->length || @@ -892,7 +1042,7 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid pppoe mask"); - return 0; + return false; } list[t].type = ICE_PPPOE; if (pppoe_mask->session_id) { @@ -900,10 +1050,11 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], pppoe_spec->session_id; list[t].m_u.pppoe_hdr.session_id = pppoe_mask->session_id; - input_set |= ICE_INSET_PPPOE_SESSION; + *input |= ICE_INSET_PPPOE_SESSION; + input_set_byte += 2; } t++; - pppoe_valid = 1; + pppoe_elem_valid = 1; } break; @@ -921,10 +1072,11 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], RTE_FLOW_ERROR_TYPE_ITEM, item, "Invalid pppoe proto item"); - return 0; + return false; } + input = &outer_input_set; if (pppoe_proto_spec && pppoe_proto_mask) { - if (pppoe_valid) + if (pppoe_elem_valid) t--; list[t].type = ICE_PPPOE; if (pppoe_proto_mask->proto_id) { @@ -932,10 +1084,291 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], pppoe_proto_spec->proto_id; list[t].m_u.pppoe_hdr.ppp_prot_id = pppoe_proto_mask->proto_id; - input_set |= ICE_INSET_PPPOE_PROTO; + *input |= ICE_INSET_PPPOE_PROTO; + input_set_byte += 2; + pppoe_prot_valid = 1; } + if ((pppoe_proto_mask->proto_id & + pppoe_proto_spec->proto_id) != + CPU_TO_BE16(ICE_PPP_IPV4_PROTO) && + (pppoe_proto_mask->proto_id & + pppoe_proto_spec->proto_id) != + CPU_TO_BE16(ICE_PPP_IPV6_PROTO)) + *tun_type = ICE_SW_TUN_PPPOE_PAY; + else + *tun_type = ICE_SW_TUN_PPPOE; t++; } + + break; + + case RTE_FLOW_ITEM_TYPE_ESP: + esp_spec = item->spec; + esp_mask = item->mask; + if ((esp_spec && !esp_mask) || + (!esp_spec && esp_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid esp item"); + return false; + } + /* Check esp mask and update input set */ + if (esp_mask && esp_mask->hdr.seq) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid esp mask"); + return false; + } + input = &outer_input_set; + if (!esp_spec && !esp_mask && !(*input)) { + profile_rule = 1; + if (ipv6_valid && udp_valid) + *tun_type = + ICE_SW_TUN_PROFID_IPV6_NAT_T; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_PROFID_IPV6_ESP; + else if (ipv4_valid) + goto inset_check; + } else if (esp_spec && esp_mask && + esp_mask->hdr.spi){ + if (udp_valid) + list[t].type = ICE_NAT_T; + else + list[t].type = ICE_ESP; + list[t].h_u.esp_hdr.spi = + esp_spec->hdr.spi; + list[t].m_u.esp_hdr.spi = + esp_mask->hdr.spi; + *input |= ICE_INSET_ESP_SPI; + input_set_byte += 4; + t++; + } + + if (!profile_rule) { + if (ipv6_valid && udp_valid) + *tun_type = ICE_SW_TUN_IPV6_NAT_T; + else if (ipv4_valid && udp_valid) + *tun_type = ICE_SW_TUN_IPV4_NAT_T; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_IPV6_ESP; + else if (ipv4_valid) + *tun_type = ICE_SW_TUN_IPV4_ESP; + } + break; + + case RTE_FLOW_ITEM_TYPE_AH: + ah_spec = item->spec; + ah_mask = item->mask; + if ((ah_spec && !ah_mask) || + (!ah_spec && ah_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ah item"); + return false; + } + /* Check ah mask and update input set */ + if (ah_mask && + (ah_mask->next_hdr || + ah_mask->payload_len || + ah_mask->seq_num || + ah_mask->reserved)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid ah mask"); + return false; + } + + input = &outer_input_set; + if (!ah_spec && !ah_mask && !(*input)) { + profile_rule = 1; + if (ipv6_valid && udp_valid) + *tun_type = + ICE_SW_TUN_PROFID_IPV6_NAT_T; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_PROFID_IPV6_AH; + else if (ipv4_valid) + goto inset_check; + } else if (ah_spec && ah_mask && + ah_mask->spi){ + list[t].type = ICE_AH; + list[t].h_u.ah_hdr.spi = + ah_spec->spi; + list[t].m_u.ah_hdr.spi = + ah_mask->spi; + *input |= ICE_INSET_AH_SPI; + input_set_byte += 4; + t++; + } + + if (!profile_rule) { + if (udp_valid) + goto inset_check; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_IPV6_AH; + else if (ipv4_valid) + *tun_type = ICE_SW_TUN_IPV4_AH; + } + break; + + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP: + l2tp_spec = item->spec; + l2tp_mask = item->mask; + if ((l2tp_spec && !l2tp_mask) || + (!l2tp_spec && l2tp_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid l2tp item"); + return false; + } + + input = &outer_input_set; + if (!l2tp_spec && !l2tp_mask && !(*input)) { + if (ipv6_valid) + *tun_type = + ICE_SW_TUN_PROFID_MAC_IPV6_L2TPV3; + else if (ipv4_valid) + goto inset_check; + } else if (l2tp_spec && l2tp_mask && + l2tp_mask->session_id){ + list[t].type = ICE_L2TPV3; + list[t].h_u.l2tpv3_sess_hdr.session_id = + l2tp_spec->session_id; + list[t].m_u.l2tpv3_sess_hdr.session_id = + l2tp_mask->session_id; + *input |= ICE_INSET_L2TPV3OIP_SESSION_ID; + input_set_byte += 4; + t++; + } + + if (!profile_rule) { + if (ipv6_valid) + *tun_type = + ICE_SW_TUN_IPV6_L2TPV3; + else if (ipv4_valid) + *tun_type = + ICE_SW_TUN_IPV4_L2TPV3; + } + break; + + case RTE_FLOW_ITEM_TYPE_PFCP: + pfcp_spec = item->spec; + pfcp_mask = item->mask; + /* Check if PFCP item is used to describe protocol. + * If yes, both spec and mask should be NULL. + * If no, both spec and mask shouldn't be NULL. + */ + if ((!pfcp_spec && pfcp_mask) || + (pfcp_spec && !pfcp_mask)) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid PFCP item"); + return false; + } + if (pfcp_spec && pfcp_mask) { + /* Check pfcp mask and update input set */ + if (pfcp_mask->msg_type || + pfcp_mask->msg_len || + pfcp_mask->seid) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid pfcp mask"); + return false; + } + if (pfcp_mask->s_field && + pfcp_spec->s_field == 0x01 && + ipv6_valid) + *tun_type = + ICE_SW_TUN_PROFID_IPV6_PFCP_SESSION; + else if (pfcp_mask->s_field && + pfcp_spec->s_field == 0x01) + *tun_type = + ICE_SW_TUN_PROFID_IPV4_PFCP_SESSION; + else if (pfcp_mask->s_field && + !pfcp_spec->s_field && + ipv6_valid) + *tun_type = + ICE_SW_TUN_PROFID_IPV6_PFCP_NODE; + else if (pfcp_mask->s_field && + !pfcp_spec->s_field) + *tun_type = + ICE_SW_TUN_PROFID_IPV4_PFCP_NODE; + else + return false; + } + break; + + case RTE_FLOW_ITEM_TYPE_GTPU: + gtp_spec = item->spec; + gtp_mask = item->mask; + if (gtp_spec && !gtp_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP item"); + return false; + } + if (gtp_spec && gtp_mask) { + if (gtp_mask->v_pt_rsv_flags || + gtp_mask->msg_type || + gtp_mask->msg_len) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTP mask"); + return false; + } + input = &outer_input_set; + if (gtp_mask->teid) + *input |= ICE_INSET_GTPU_TEID; + list[t].type = ICE_GTP; + list[t].h_u.gtp_hdr.teid = + gtp_spec->teid; + list[t].m_u.gtp_hdr.teid = + gtp_mask->teid; + input_set_byte += 4; + t++; + } + tunnel_valid = 1; + gtpu_valid = 1; + break; + + case RTE_FLOW_ITEM_TYPE_GTP_PSC: + gtp_psc_spec = item->spec; + gtp_psc_mask = item->mask; + if (gtp_psc_spec && !gtp_psc_mask) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU_EH item"); + return false; + } + if (gtp_psc_spec && gtp_psc_mask) { + if (gtp_psc_mask->pdu_type) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "Invalid GTPU_EH mask"); + return false; + } + input = &outer_input_set; + if (gtp_psc_mask->qfi) + *input |= ICE_INSET_GTPU_QFI; + list[t].type = ICE_GTP; + list[t].h_u.gtp_hdr.qfi = + gtp_psc_spec->qfi; + list[t].m_u.gtp_hdr.qfi = + gtp_psc_mask->qfi; + input_set_byte += 1; + t++; + } + gtpu_psc_valid = 1; break; case RTE_FLOW_ITEM_TYPE_VOID: @@ -945,19 +1378,153 @@ ice_switch_inset_get(const struct rte_flow_item pattern[], rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, pattern, "Invalid pattern item."); - goto out; + return false; + } + } + + if (*tun_type == ICE_SW_TUN_PPPOE_PAY && + inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_SW_TUN_PPPOE_PAY_QINQ; + else if (*tun_type == ICE_SW_TUN_PPPOE && + inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_SW_TUN_PPPOE_QINQ; + else if (*tun_type == ICE_NON_TUN && + inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_NON_TUN_QINQ; + else if (*tun_type == ICE_SW_TUN_AND_NON_TUN && + inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ; + + if (pppoe_patt_valid && !pppoe_prot_valid) { + if (inner_vlan_valid && outer_vlan_valid && ipv4_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV4_QINQ; + else if (inner_vlan_valid && outer_vlan_valid && ipv6_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV6_QINQ; + else if (inner_vlan_valid && outer_vlan_valid) + *tun_type = ICE_SW_TUN_PPPOE_QINQ; + else if (ipv6_valid && udp_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV6_UDP; + else if (ipv6_valid && tcp_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV6_TCP; + else if (ipv4_valid && udp_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV4_UDP; + else if (ipv4_valid && tcp_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV4_TCP; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV6; + else if (ipv4_valid) + *tun_type = ICE_SW_TUN_PPPOE_IPV4; + else + *tun_type = ICE_SW_TUN_PPPOE; + } + + if (gtpu_valid && gtpu_psc_valid) { + if (ipv4_valid && inner_ipv4_valid && inner_udp_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_UDP; + else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4_TCP; + else if (ipv4_valid && inner_ipv4_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV4; + else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_UDP; + else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6_TCP; + else if (ipv4_valid && inner_ipv6_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_EH_IPV6; + else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_UDP; + else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4_TCP; + else if (ipv6_valid && inner_ipv4_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV4; + else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_UDP; + else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6_TCP; + else if (ipv6_valid && inner_ipv6_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_EH_IPV6; + else if (ipv4_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY; + } else if (gtpu_valid) { + if (ipv4_valid && inner_ipv4_valid && inner_udp_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_UDP; + else if (ipv4_valid && inner_ipv4_valid && inner_tcp_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4_TCP; + else if (ipv4_valid && inner_ipv4_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV4; + else if (ipv4_valid && inner_ipv6_valid && inner_udp_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_UDP; + else if (ipv4_valid && inner_ipv6_valid && inner_tcp_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6_TCP; + else if (ipv4_valid && inner_ipv6_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_IPV6; + else if (ipv6_valid && inner_ipv4_valid && inner_udp_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_UDP; + else if (ipv6_valid && inner_ipv4_valid && inner_tcp_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4_TCP; + else if (ipv6_valid && inner_ipv4_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV4; + else if (ipv6_valid && inner_ipv6_valid && inner_udp_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_UDP; + else if (ipv6_valid && inner_ipv6_valid && inner_tcp_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6_TCP; + else if (ipv6_valid && inner_ipv6_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_IPV6; + else if (ipv4_valid) + *tun_type = ICE_SW_TUN_IPV4_GTPU_NO_PAY; + else if (ipv6_valid) + *tun_type = ICE_SW_TUN_IPV6_GTPU_NO_PAY; + } + + if (*tun_type == ICE_SW_TUN_IPV4_GTPU_NO_PAY || + *tun_type == ICE_SW_TUN_IPV6_GTPU_NO_PAY) { + for (k = 0; k < t; k++) { + if (list[k].type == ICE_GTP) + list[k].type = ICE_GTP_NO_PAY; } } + if (*tun_type == ICE_NON_TUN) { + if (vxlan_valid) + *tun_type = ICE_SW_TUN_VXLAN; + else if (nvgre_valid) + *tun_type = ICE_SW_TUN_NVGRE; + else if (ipv4_valid && tcp_valid) + *tun_type = ICE_SW_IPV4_TCP; + else if (ipv4_valid && udp_valid) + *tun_type = ICE_SW_IPV4_UDP; + else if (ipv6_valid && tcp_valid) + *tun_type = ICE_SW_IPV6_TCP; + else if (ipv6_valid && udp_valid) + *tun_type = ICE_SW_IPV6_UDP; + } + + if (input_set_byte > MAX_INPUT_SET_BYTE) { + rte_flow_error_set(error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "too much input set"); + return false; + } + *lkups_num = t; - return input_set; -out: - return 0; +inset_check: + if ((!outer_input_set && !inner_input_set && + !ice_is_prof_rule(*tun_type)) || (outer_input_set & + ~pattern_match_item->input_set_mask_o) || + (inner_input_set & ~pattern_match_item->input_set_mask_i)) + return false; + + return true; } static int -ice_switch_parse_dcf_action(const struct rte_flow_action *actions, +ice_switch_parse_dcf_action(struct ice_dcf_adapter *ad, + const struct rte_flow_action *actions, + uint32_t priority, struct rte_flow_error *error, struct ice_adv_rule_info *rule_info) { @@ -972,20 +1539,40 @@ ice_switch_parse_dcf_action(const struct rte_flow_action *actions, case RTE_FLOW_ACTION_TYPE_VF: rule_info->sw_act.fltr_act = ICE_FWD_TO_VSI; act_vf = action->conf; - rule_info->sw_act.vsi_handle = act_vf->id; + + if (act_vf->id >= ad->real_hw.num_vfs && + !act_vf->original) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid vf id"); + return -rte_errno; + } + + if (act_vf->original) + rule_info->sw_act.vsi_handle = + ad->real_hw.avf.bus.func; + else + rule_info->sw_act.vsi_handle = act_vf->id; + break; + + case RTE_FLOW_ACTION_TYPE_DROP: + rule_info->sw_act.fltr_act = ICE_DROP_PACKET; break; + default: rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, - "Invalid action type or queue number"); + "Invalid action type"); return -rte_errno; } } rule_info->sw_act.src = rule_info->sw_act.vsi_handle; + rule_info->sw_act.flag = ICE_FLTR_RX; rule_info->rx = 1; - rule_info->priority = 5; + rule_info->priority = priority + 5; return 0; } @@ -993,11 +1580,12 @@ ice_switch_parse_dcf_action(const struct rte_flow_action *actions, static int ice_switch_parse_action(struct ice_pf *pf, const struct rte_flow_action *actions, + uint32_t priority, struct rte_flow_error *error, struct ice_adv_rule_info *rule_info) { struct ice_vsi *vsi = pf->main_vsi; - struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct rte_eth_dev_data *dev_data = pf->adapter->pf.dev_data; const struct rte_flow_action_queue *act_q; const struct rte_flow_action_rss *act_qgrop; uint16_t base_queue, i; @@ -1013,6 +1601,8 @@ ice_switch_parse_action(struct ice_pf *pf, switch (action_type) { case RTE_FLOW_ACTION_TYPE_RSS: act_qgrop = action->conf; + if (act_qgrop->queue_num <= 1) + goto error; rule_info->sw_act.fltr_act = ICE_FWD_TO_QGRP; rule_info->sw_act.fwd_id.q_id = @@ -1026,18 +1616,18 @@ ice_switch_parse_action(struct ice_pf *pf, goto error; if ((act_qgrop->queue[0] + act_qgrop->queue_num) > - dev->data->nb_rx_queues) - goto error; + dev_data->nb_rx_queues) + goto error1; for (i = 0; i < act_qgrop->queue_num - 1; i++) if (act_qgrop->queue[i + 1] != act_qgrop->queue[i] + 1) - goto error; + goto error2; rule_info->sw_act.qgrp_size = act_qgrop->queue_num; break; case RTE_FLOW_ACTION_TYPE_QUEUE: act_q = action->conf; - if (act_q->index >= dev->data->nb_rx_queues) + if (act_q->index >= dev_data->nb_rx_queues) goto error; rule_info->sw_act.fltr_act = ICE_FWD_TO_Q; @@ -1061,7 +1651,7 @@ ice_switch_parse_action(struct ice_pf *pf, rule_info->sw_act.vsi_handle = vsi->idx; rule_info->rx = 1; rule_info->sw_act.src = vsi->idx; - rule_info->priority = 5; + rule_info->priority = priority + 5; return 0; @@ -1071,6 +1661,60 @@ error: actions, "Invalid action type or queue number"); return -rte_errno; + +error1: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid queue region indexes"); + return -rte_errno; + +error2: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Discontinuous queue region"); + return -rte_errno; +} + +static int +ice_switch_check_action(const struct rte_flow_action *actions, + struct rte_flow_error *error) +{ + const struct rte_flow_action *action; + enum rte_flow_action_type action_type; + uint16_t actions_num = 0; + + for (action = actions; action->type != + RTE_FLOW_ACTION_TYPE_END; action++) { + action_type = action->type; + switch (action_type) { + case RTE_FLOW_ACTION_TYPE_VF: + case RTE_FLOW_ACTION_TYPE_RSS: + case RTE_FLOW_ACTION_TYPE_QUEUE: + case RTE_FLOW_ACTION_TYPE_DROP: + actions_num++; + break; + case RTE_FLOW_ACTION_TYPE_VOID: + continue; + default: + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid action type"); + return -rte_errno; + } + } + + if (actions_num != 1) { + rte_flow_error_set(error, + EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "Invalid action number"); + return -rte_errno; + } + + return 0; } static int @@ -1079,11 +1723,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, uint32_t array_len, const struct rte_flow_item pattern[], const struct rte_flow_action actions[], + uint32_t priority, void **meta, struct rte_flow_error *error) { struct ice_pf *pf = &ad->pf; - uint64_t inputset = 0; int ret = 0; struct sw_meta *sw_meta_ptr = NULL; struct ice_adv_rule_info rule_info; @@ -1091,19 +1735,13 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, uint16_t lkups_num = 0; const struct rte_flow_item *item = pattern; uint16_t item_num = 0; + uint16_t vlan_num = 0; enum ice_sw_tunnel_type tun_type = - ICE_SW_TUN_AND_NON_TUN; + ICE_NON_TUN; struct ice_pattern_match_item *pattern_match_item = NULL; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { item_num++; - if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) - tun_type = ICE_SW_TUN_VXLAN; - if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) - tun_type = ICE_SW_TUN_NVGRE; - if (item->type == RTE_FLOW_ITEM_TYPE_PPPOED || - item->type == RTE_FLOW_ITEM_TYPE_PPPOES) - tun_type = ICE_SW_TUN_PPPOE; if (item->type == RTE_FLOW_ITEM_TYPE_ETH) { const struct rte_flow_item_eth *eth_mask; if (item->mask) @@ -1113,6 +1751,10 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, if (eth_mask->type == UINT16_MAX) tun_type = ICE_SW_TUN_AND_NON_TUN; } + + if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) + vlan_num++; + /* reserve one more memory slot for ETH which may * consume 2 lookup items. */ @@ -1120,6 +1762,11 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, item_num++; } + if (vlan_num == 2 && tun_type == ICE_SW_TUN_AND_NON_TUN) + tun_type = ICE_SW_TUN_AND_NON_TUN_QINQ; + else if (vlan_num == 2) + tun_type = ICE_NON_TUN_QINQ; + list = rte_zmalloc(NULL, item_num * sizeof(*list), 0); if (!list) { rte_flow_error_set(error, EINVAL, @@ -1128,8 +1775,6 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, return -rte_errno; } - rule_info.tun_type = tun_type; - sw_meta_ptr = rte_zmalloc(NULL, sizeof(*sw_meta_ptr), 0); if (!sw_meta_ptr) { @@ -1140,7 +1785,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, } pattern_match_item = - ice_search_pattern_match_item(pattern, array, array_len, error); + ice_search_pattern_match_item(ad, pattern, array, array_len, + error); if (!pattern_match_item) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, @@ -1148,9 +1794,8 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, goto error; } - inputset = ice_switch_inset_get - (pattern, error, list, &lkups_num, tun_type); - if (!inputset || (inputset & ~pattern_match_item->input_set_mask)) { + if (!ice_switch_parse_pattern(pattern, error, list, &lkups_num, + &tun_type, pattern_match_item)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern, @@ -1158,17 +1803,22 @@ ice_switch_parse_pattern_action(struct ice_adapter *ad, goto error; } + memset(&rule_info, 0, sizeof(rule_info)); + rule_info.tun_type = tun_type; + + ret = ice_switch_check_action(actions, error); + if (ret) + goto error; + if (ad->hw.dcf_enabled) - ret = ice_switch_parse_dcf_action(actions, error, &rule_info); + ret = ice_switch_parse_dcf_action((void *)ad, actions, priority, + error, &rule_info); else - ret = ice_switch_parse_action(pf, actions, error, &rule_info); + ret = ice_switch_parse_action(pf, actions, priority, error, + &rule_info); - if (ret) { - rte_flow_error_set(error, EINVAL, - RTE_FLOW_ERROR_TYPE_HANDLE, NULL, - "Invalid input action"); + if (ret) goto error; - } if (meta) { *meta = sw_meta_ptr; @@ -1206,24 +1856,102 @@ ice_switch_query(struct ice_adapter *ad __rte_unused, return -rte_errno; } +static int +ice_switch_redirect(struct ice_adapter *ad, + struct rte_flow *flow, + struct ice_flow_redirect *rd) +{ + struct ice_rule_query_data *rdata = flow->rule; + struct ice_adv_fltr_mgmt_list_entry *list_itr; + struct ice_adv_lkup_elem *lkups_dp = NULL; + struct LIST_HEAD_TYPE *list_head; + struct ice_adv_rule_info rinfo; + struct ice_hw *hw = &ad->hw; + struct ice_switch_info *sw; + uint16_t lkups_cnt; + int ret; + + if (rdata->vsi_handle != rd->vsi_handle) + return 0; + + sw = hw->switch_info; + if (!sw->recp_list[rdata->rid].recp_created) + return -EINVAL; + + if (rd->type != ICE_FLOW_REDIRECT_VSI) + return -ENOTSUP; + + list_head = &sw->recp_list[rdata->rid].filt_rules; + LIST_FOR_EACH_ENTRY(list_itr, list_head, ice_adv_fltr_mgmt_list_entry, + list_entry) { + rinfo = list_itr->rule_info; + if ((rinfo.fltr_rule_id == rdata->rule_id && + rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI && + rinfo.sw_act.vsi_handle == rd->vsi_handle) || + (rinfo.fltr_rule_id == rdata->rule_id && + rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST)){ + lkups_cnt = list_itr->lkups_cnt; + lkups_dp = (struct ice_adv_lkup_elem *) + ice_memdup(hw, list_itr->lkups, + sizeof(*list_itr->lkups) * + lkups_cnt, ICE_NONDMA_TO_NONDMA); + + if (!lkups_dp) { + PMD_DRV_LOG(ERR, "Failed to allocate memory."); + return -EINVAL; + } + + if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) { + rinfo.sw_act.vsi_handle = rd->vsi_handle; + rinfo.sw_act.fltr_act = ICE_FWD_TO_VSI; + } + break; + } + } + + if (!lkups_dp) + return -EINVAL; + + /* Remove the old rule */ + ret = ice_rem_adv_rule(hw, list_itr->lkups, + lkups_cnt, &rinfo); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to delete the old rule %d", + rdata->rule_id); + ret = -EINVAL; + goto out; + } + + /* Update VSI context */ + hw->vsi_ctx[rd->vsi_handle]->vsi_num = rd->new_vsi_num; + + /* Replay the rule */ + ret = ice_add_adv_rule(hw, lkups_dp, lkups_cnt, + &rinfo, rdata); + if (ret) { + PMD_DRV_LOG(ERR, "Failed to replay the rule"); + ret = -EINVAL; + } + +out: + ice_free(hw, lkups_dp); + return ret; +} + static int ice_switch_init(struct ice_adapter *ad) { int ret = 0; struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + struct ice_flow_parser *perm_parser; - if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) - dist_parser = &ice_switch_dist_parser_comms; - else if (ad->active_pkg_type == ICE_PKG_TYPE_OS_DEFAULT) - dist_parser = &ice_switch_dist_parser_os; - else - return -EINVAL; - - if (ad->devargs.pipe_mode_support) + if (ad->devargs.pipe_mode_support) { + perm_parser = &ice_switch_perm_parser; ret = ice_register_parser(perm_parser, ad); - else + } else { + dist_parser = &ice_switch_dist_parser; ret = ice_register_parser(dist_parser, ad); + } return ret; } @@ -1231,17 +1959,15 @@ static void ice_switch_uninit(struct ice_adapter *ad) { struct ice_flow_parser *dist_parser; - struct ice_flow_parser *perm_parser = &ice_switch_perm_parser; + struct ice_flow_parser *perm_parser; - if (ad->active_pkg_type == ICE_PKG_TYPE_COMMS) - dist_parser = &ice_switch_dist_parser_comms; - else - dist_parser = &ice_switch_dist_parser_os; - - if (ad->devargs.pipe_mode_support) + if (ad->devargs.pipe_mode_support) { + perm_parser = &ice_switch_perm_parser; ice_unregister_parser(perm_parser, ad); - else + } else { + dist_parser = &ice_switch_dist_parser; ice_unregister_parser(dist_parser, ad); + } } static struct @@ -1251,24 +1977,16 @@ ice_flow_engine ice_switch_engine = { .create = ice_switch_create, .destroy = ice_switch_destroy, .query_count = ice_switch_query, + .redirect = ice_switch_redirect, .free = ice_switch_filter_rule_free, .type = ICE_FLOW_ENGINE_SWITCH, }; static struct -ice_flow_parser ice_switch_dist_parser_os = { - .engine = &ice_switch_engine, - .array = ice_switch_pattern_dist_os, - .array_len = RTE_DIM(ice_switch_pattern_dist_os), - .parse_pattern_action = ice_switch_parse_pattern_action, - .stage = ICE_FLOW_STAGE_DISTRIBUTOR, -}; - -static struct -ice_flow_parser ice_switch_dist_parser_comms = { +ice_flow_parser ice_switch_dist_parser = { .engine = &ice_switch_engine, - .array = ice_switch_pattern_dist_comms, - .array_len = RTE_DIM(ice_switch_pattern_dist_comms), + .array = ice_switch_pattern_dist_list, + .array_len = RTE_DIM(ice_switch_pattern_dist_list), .parse_pattern_action = ice_switch_parse_pattern_action, .stage = ICE_FLOW_STAGE_DISTRIBUTOR, }; @@ -1276,8 +1994,8 @@ ice_flow_parser ice_switch_dist_parser_comms = { static struct ice_flow_parser ice_switch_perm_parser = { .engine = &ice_switch_engine, - .array = ice_switch_pattern_perm, - .array_len = RTE_DIM(ice_switch_pattern_perm), + .array = ice_switch_pattern_perm_list, + .array_len = RTE_DIM(ice_switch_pattern_perm_list), .parse_pattern_action = ice_switch_parse_pattern_action, .stage = ICE_FLOW_STAGE_PERMISSION, };