+copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
+ const void *val, const void *mask, uint8_t val_size,
+ uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
+{
+ uint8_t *l5_mask, *l5_val;
+ uint8_t start_off;
+
+ /* No space left in the L5 pattern buffer. */
+ start_off = *inner_ofst;
+ if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
+ return ENOTSUP;
+ l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
+ l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
+ /* Copy the pattern into the L5 buffer. */
+ if (val) {
+ memcpy(l5_mask + start_off, mask, val_size);
+ memcpy(l5_val + start_off, val, val_size);
+ }
+ /* Set the protocol field in the previous header. */
+ if (proto_off) {
+ void *m, *v;
+
+ m = l5_mask + proto_off;
+ v = l5_val + proto_off;
+ if (proto_size == 1) {
+ *(uint8_t *)m = 0xff;
+ *(uint8_t *)v = (uint8_t)proto_val;
+ } else if (proto_size == 2) {
+ *(uint16_t *)m = 0xffff;
+ *(uint16_t *)v = proto_val;
+ }
+ }
+ /* All inner headers land in L5 buffer even if their spec is null. */
+ *inner_ofst += val_size;
+ return 0;
+}
+
+static int
+enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ ENICPMD_FUNC_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_eth_mask;
+ arg->l2_proto_off = *off + offsetof(struct rte_ether_hdr, ether_type);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct rte_ether_hdr),
+ 0 /* no previous protocol */, 0, 0);
+}
+
+static int
+enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+ uint8_t eth_type_off;
+
+ ENICPMD_FUNC_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_vlan_mask;
+ /* Append vlan header to L5 and set ether type = TPID */
+ eth_type_off = arg->l2_proto_off;
+ arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct rte_vlan_hdr),
+ eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2);
+}
+
+static int
+enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ ENICPMD_FUNC_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_ipv4_mask;
+ /* Append ipv4 header to L5 and set ether type = ipv4 */
+ arg->l3_proto_off = *off + offsetof(struct rte_ipv4_hdr, next_proto_id);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct rte_ipv4_hdr),
+ arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4), 2);
+}
+
+static int
+enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ ENICPMD_FUNC_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_ipv6_mask;
+ /* Append ipv6 header to L5 and set ether type = ipv6 */
+ arg->l3_proto_off = *off + offsetof(struct rte_ipv6_hdr, proto);
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct rte_ipv6_hdr),
+ arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6), 2);
+}
+
+static int
+enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ ENICPMD_FUNC_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_udp_mask;
+ /* Append udp header to L5 and set ip proto = udp */
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct rte_udp_hdr),
+ arg->l3_proto_off, IPPROTO_UDP, 1);
+}
+
+static int
+enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
+{
+ const void *mask = arg->item->mask;
+ uint8_t *off = arg->inner_ofst;
+
+ ENICPMD_FUNC_TRACE();
+ if (!mask)
+ mask = &rte_flow_item_tcp_mask;
+ /* Append tcp header to L5 and set ip proto = tcp */
+ return copy_inner_common(&arg->filter->u.generic_1, off,
+ arg->item->spec, mask, sizeof(struct rte_tcp_hdr),
+ arg->l3_proto_off, IPPROTO_TCP, 1);
+}
+
+static int
+enic_copy_item_eth_v2(struct copy_item_args *arg)