X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_flow.c;h=d188e582902be5798d0a24fccd7054cbf2d4f791;hb=e73e3547ce54d7ae48dff82d87efac0b7a30692a;hp=32ebeff09f1c82c9072c31b24f3b18d019c3f04e;hpb=37b07be28a3fcdde2724cc467bdb3c82bf35deef;p=dpdk.git diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c index 32ebeff09f..d188e58290 100644 --- a/drivers/net/enic/enic_flow.c +++ b/drivers/net/enic/enic_flow.c @@ -39,6 +39,7 @@ struct copy_item_args { uint8_t *inner_ofst; uint8_t l2_proto_off; uint8_t l3_proto_off; + struct enic *enic; }; /* functions for copying items into enic filters */ @@ -410,7 +411,7 @@ enic_copy_item_ipv4_v1(struct copy_item_args *arg) const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; - struct ipv4_hdr supported_mask = { + struct rte_ipv4_hdr supported_mask = { .src_addr = 0xffffffff, .dst_addr = 0xffffffff, }; @@ -448,7 +449,7 @@ enic_copy_item_udp_v1(struct copy_item_args *arg) const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; - struct udp_hdr supported_mask = { + struct rte_udp_hdr supported_mask = { .src_port = 0xffff, .dst_port = 0xffff, }; @@ -487,7 +488,7 @@ enic_copy_item_tcp_v1(struct copy_item_args *arg) const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; - struct tcp_hdr supported_mask = { + struct rte_tcp_hdr supported_mask = { .src_port = 0xffff, .dst_port = 0xffff, }; @@ -571,9 +572,9 @@ enic_copy_item_inner_eth_v2(struct copy_item_args *arg) FLOW_TRACE(); if (!mask) mask = &rte_flow_item_eth_mask; - arg->l2_proto_off = *off + offsetof(struct ether_hdr, ether_type); + arg->l2_proto_off = *off + offsetof(struct rte_ether_hdr, ether_type); return copy_inner_common(&arg->filter->u.generic_1, off, - arg->item->spec, mask, sizeof(struct ether_hdr), + arg->item->spec, mask, sizeof(struct rte_ether_hdr), 0 /* no previous protocol */, 0, 0); } @@ -589,10 +590,10 @@ enic_copy_item_inner_vlan_v2(struct copy_item_args *arg) mask = &rte_flow_item_vlan_mask; /* Append vlan header to L5 and set ether type = TPID */ eth_type_off = arg->l2_proto_off; - arg->l2_proto_off = *off + offsetof(struct vlan_hdr, eth_proto); + arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto); return copy_inner_common(&arg->filter->u.generic_1, off, - arg->item->spec, mask, sizeof(struct vlan_hdr), - eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2); + arg->item->spec, mask, sizeof(struct rte_vlan_hdr), + eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2); } static int @@ -605,10 +606,10 @@ enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg) if (!mask) mask = &rte_flow_item_ipv4_mask; /* Append ipv4 header to L5 and set ether type = ipv4 */ - arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id); + arg->l3_proto_off = *off + offsetof(struct rte_ipv4_hdr, next_proto_id); return copy_inner_common(&arg->filter->u.generic_1, off, - arg->item->spec, mask, sizeof(struct ipv4_hdr), - arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2); + arg->item->spec, mask, sizeof(struct rte_ipv4_hdr), + arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv4), 2); } static int @@ -621,10 +622,10 @@ enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg) if (!mask) mask = &rte_flow_item_ipv6_mask; /* Append ipv6 header to L5 and set ether type = ipv6 */ - arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto); + arg->l3_proto_off = *off + offsetof(struct rte_ipv6_hdr, proto); return copy_inner_common(&arg->filter->u.generic_1, off, - arg->item->spec, mask, sizeof(struct ipv6_hdr), - arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2); + arg->item->spec, mask, sizeof(struct rte_ipv6_hdr), + arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPv6), 2); } static int @@ -638,7 +639,7 @@ enic_copy_item_inner_udp_v2(struct copy_item_args *arg) mask = &rte_flow_item_udp_mask; /* Append udp header to L5 and set ip proto = udp */ return copy_inner_common(&arg->filter->u.generic_1, off, - arg->item->spec, mask, sizeof(struct udp_hdr), + arg->item->spec, mask, sizeof(struct rte_udp_hdr), arg->l3_proto_off, IPPROTO_UDP, 1); } @@ -653,7 +654,7 @@ enic_copy_item_inner_tcp_v2(struct copy_item_args *arg) mask = &rte_flow_item_tcp_mask; /* Append tcp header to L5 and set ip proto = tcp */ return copy_inner_common(&arg->filter->u.generic_1, off, - arg->item->spec, mask, sizeof(struct tcp_hdr), + arg->item->spec, mask, sizeof(struct rte_tcp_hdr), arg->l3_proto_off, IPPROTO_TCP, 1); } @@ -662,8 +663,8 @@ enic_copy_item_eth_v2(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - struct ether_hdr enic_spec; - struct ether_hdr enic_mask; + struct rte_ether_hdr enic_spec; + struct rte_ether_hdr enic_mask; const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; @@ -678,22 +679,22 @@ enic_copy_item_eth_v2(struct copy_item_args *arg) mask = &rte_flow_item_eth_mask; memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); enic_spec.ether_type = spec->type; enic_mask.ether_type = mask->type; /* outer header */ memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask, - sizeof(struct ether_hdr)); + sizeof(struct rte_ether_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec, - sizeof(struct ether_hdr)); + sizeof(struct rte_ether_hdr)); return 0; } @@ -705,8 +706,8 @@ enic_copy_item_vlan_v2(struct copy_item_args *arg) const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - struct ether_hdr *eth_mask; - struct ether_hdr *eth_val; + struct rte_ether_hdr *eth_mask; + struct rte_ether_hdr *eth_val; FLOW_TRACE(); @@ -723,12 +724,26 @@ enic_copy_item_vlan_v2(struct copy_item_args *arg) if (eth_mask->ether_type) return ENOTSUP; /* + * For recent models: * When packet matching, the VIC always compares vlan-stripped * L2, regardless of vlan stripping settings. So, the inner type * from vlan becomes the ether type of the eth header. + * + * Older models w/o hardware vxlan parser have a different + * behavior when vlan stripping is disabled. In this case, + * vlan tag remains in the L2 buffer. */ - eth_mask->ether_type = mask->inner_type; - eth_val->ether_type = spec->inner_type; + if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) { + struct rte_vlan_hdr *vlan; + + vlan = (struct rte_vlan_hdr *)(eth_mask + 1); + vlan->eth_proto = mask->inner_type; + vlan = (struct rte_vlan_hdr *)(eth_val + 1); + vlan->eth_proto = spec->inner_type; + } else { + eth_mask->ether_type = mask->inner_type; + eth_val->ether_type = spec->inner_type; + } /* For TCI, use the vlan mask/val fields (little endian). */ gp->mask_vlan = rte_be_to_cpu_16(mask->tci); gp->val_vlan = rte_be_to_cpu_16(spec->tci); @@ -758,9 +773,9 @@ enic_copy_item_ipv4_v2(struct copy_item_args *arg) mask = &rte_flow_item_ipv4_mask; memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, - sizeof(struct ipv4_hdr)); + sizeof(struct rte_ipv4_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, - sizeof(struct ipv4_hdr)); + sizeof(struct rte_ipv4_hdr)); return 0; } @@ -787,9 +802,9 @@ enic_copy_item_ipv6_v2(struct copy_item_args *arg) mask = &rte_flow_item_ipv6_mask; memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, - sizeof(struct ipv6_hdr)); + sizeof(struct rte_ipv6_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, - sizeof(struct ipv6_hdr)); + sizeof(struct rte_ipv6_hdr)); return 0; } @@ -816,9 +831,9 @@ enic_copy_item_udp_v2(struct copy_item_args *arg) mask = &rte_flow_item_udp_mask; memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, - sizeof(struct udp_hdr)); + sizeof(struct rte_udp_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, - sizeof(struct udp_hdr)); + sizeof(struct rte_udp_hdr)); return 0; } @@ -845,9 +860,9 @@ enic_copy_item_tcp_v2(struct copy_item_args *arg) return ENOTSUP; memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, - sizeof(struct tcp_hdr)); + sizeof(struct rte_tcp_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, - sizeof(struct tcp_hdr)); + sizeof(struct rte_tcp_hdr)); return 0; } @@ -869,16 +884,16 @@ enic_copy_item_sctp_v2(struct copy_item_args *arg) * the protocol number in the IP pattern. */ if (gp->val_flags & FILTER_GENERIC_1_IPV4) { - struct ipv4_hdr *ip; - ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; + struct rte_ipv4_hdr *ip; + ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; ip_proto_mask = &ip->next_proto_id; - ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; + ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; ip_proto = &ip->next_proto_id; } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) { - struct ipv6_hdr *ip; - ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; + struct rte_ipv6_hdr *ip; + ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; ip_proto_mask = &ip->proto; - ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; + ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; ip_proto = &ip->proto; } else { /* Need IPv4/IPv6 pattern first */ @@ -895,9 +910,9 @@ enic_copy_item_sctp_v2(struct copy_item_args *arg) mask = &rte_flow_item_sctp_mask; memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, - sizeof(struct sctp_hdr)); + sizeof(struct rte_sctp_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, - sizeof(struct sctp_hdr)); + sizeof(struct rte_sctp_hdr)); return 0; } @@ -910,7 +925,7 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg) const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - struct udp_hdr *udp; + struct rte_udp_hdr *udp; FLOW_TRACE(); @@ -920,9 +935,9 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg) */ gp->mask_flags |= FILTER_GENERIC_1_UDP; gp->val_flags |= FILTER_GENERIC_1_UDP; - udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask; + udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask; udp->dst_port = 0xffff; - udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val; + udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val; udp->dst_port = RTE_BE16(4789); /* Match all if no spec */ if (!spec) @@ -932,11 +947,11 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg) mask = &rte_flow_item_vxlan_mask; memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask, - sizeof(struct vxlan_hdr)); + sizeof(struct rte_vxlan_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec, - sizeof(struct vxlan_hdr)); + sizeof(struct rte_vxlan_hdr)); - *inner_ofst = sizeof(struct vxlan_hdr); + *inner_ofst = sizeof(struct rte_vxlan_hdr); return 0; } @@ -968,7 +983,7 @@ enic_copy_item_raw_v2(struct copy_item_args *arg) return EINVAL; /* Need non-null pattern that fits within the NIC's filter pattern */ if (spec->length == 0 || - spec->length + sizeof(struct udp_hdr) > FILTER_GENERIC_1_KEY_LEN || + spec->length + sizeof(struct rte_udp_hdr) > FILTER_GENERIC_1_KEY_LEN || !spec->pattern || !mask->pattern) return EINVAL; /* @@ -981,9 +996,9 @@ enic_copy_item_raw_v2(struct copy_item_args *arg) */ if (mask->length != 0 && mask->length < spec->length) return EINVAL; - memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr), + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr), mask->pattern, spec->length); - memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr), + memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr), spec->pattern, spec->length); return 0; @@ -1036,10 +1051,10 @@ fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp, if (!(inner_ofst > 0 && enic->vxlan)) return; FLOW_TRACE(); - vxlan = sizeof(struct vxlan_hdr); - memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr), + vxlan = sizeof(struct rte_vxlan_hdr); + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr), gp->layer[FILTER_GENERIC_1_L5].mask, vxlan); - memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr), + memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr), gp->layer[FILTER_GENERIC_1_L5].val, vxlan); inner = inner_ofst - vxlan; memset(layer, 0, sizeof(layer)); @@ -1083,6 +1098,7 @@ enic_copy_filter(const struct rte_flow_item pattern[], args.filter = enic_filter; args.inner_ofst = &inner_ofst; + args.enic = enic; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { /* Get info about how to validate and copy the item. If NULL * is returned the nic does not support the item.