X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_flow.c;h=cebca7d55a99655b9198126f870757130505bc67;hb=cfa9e98d3c135e4f937265263feb7377330219f6;hp=b3172e7be5d1682b4d051de5a48d4ad5beb960ef;hpb=593f17668f5ba3445bdedaa0edc5d308ca43e061;p=dpdk.git diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c index b3172e7be5..cebca7d55a 100644 --- a/drivers/net/enic/enic_flow.c +++ b/drivers/net/enic/enic_flow.c @@ -16,13 +16,6 @@ #include "vnic_dev.h" #include "vnic_nic.h" -#define FLOW_TRACE() \ - rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \ - "%s()\n", __func__) -#define FLOW_LOG(level, fmt, args...) \ - rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \ - fmt "\n", ##args) - /* * Common arguments passed to copy_item functions. Use this structure * so we can easily add new arguments. @@ -30,11 +23,16 @@ * filter: Partially filled in NIC filter structure. * inner_ofst: If zero, this is an outer header. If non-zero, this is * the offset into L5 where the header begins. + * l2_proto_off: offset to EtherType eth or vlan header. + * l3_proto_off: offset to next protocol field in IPv4 or 6 header. */ struct copy_item_args { const struct rte_flow_item *item; struct filter_v2 *filter; uint8_t *inner_ofst; + uint8_t l2_proto_off; + uint8_t l3_proto_off; + struct enic *enic; }; /* functions for copying items into enic filters */ @@ -49,7 +47,9 @@ struct enic_items { /** True if it's OK for this item to be the first item. For some NIC * versions, it's invalid to start the stack above layer 3. */ - const u8 valid_start_item; + const uint8_t valid_start_item; + /* Inner packet version of copy_item. */ + enic_copy_item_fn *inner_copy_item; }; /** Filtering capabilities for various NIC and firmware versions. */ @@ -86,6 +86,12 @@ static enic_copy_item_fn enic_copy_item_udp_v2; static enic_copy_item_fn enic_copy_item_tcp_v2; static enic_copy_item_fn enic_copy_item_sctp_v2; static enic_copy_item_fn enic_copy_item_vxlan_v2; +static enic_copy_item_fn enic_copy_item_inner_eth_v2; +static enic_copy_item_fn enic_copy_item_inner_vlan_v2; +static enic_copy_item_fn enic_copy_item_inner_ipv4_v2; +static enic_copy_item_fn enic_copy_item_inner_ipv6_v2; +static enic_copy_item_fn enic_copy_item_inner_udp_v2; +static enic_copy_item_fn enic_copy_item_inner_tcp_v2; static copy_action_fn enic_copy_action_v1; static copy_action_fn enic_copy_action_v2; @@ -100,6 +106,7 @@ static const struct enic_items enic_items_v1[] = { .prev_items = (const enum rte_flow_item_type[]) { RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, [RTE_FLOW_ITEM_TYPE_UDP] = { .copy_item = enic_copy_item_udp_v1, @@ -108,6 +115,7 @@ static const struct enic_items enic_items_v1[] = { RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, [RTE_FLOW_ITEM_TYPE_TCP] = { .copy_item = enic_copy_item_tcp_v1, @@ -116,6 +124,7 @@ static const struct enic_items enic_items_v1[] = { RTE_FLOW_ITEM_TYPE_IPV4, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, }; @@ -131,6 +140,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_UDP, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, [RTE_FLOW_ITEM_TYPE_ETH] = { .copy_item = enic_copy_item_eth_v2, @@ -139,6 +149,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_VXLAN, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_eth_v2, }, [RTE_FLOW_ITEM_TYPE_VLAN] = { .copy_item = enic_copy_item_vlan_v2, @@ -147,6 +158,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_vlan_v2, }, [RTE_FLOW_ITEM_TYPE_IPV4] = { .copy_item = enic_copy_item_ipv4_v2, @@ -156,6 +168,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_VLAN, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_ipv4_v2, }, [RTE_FLOW_ITEM_TYPE_IPV6] = { .copy_item = enic_copy_item_ipv6_v2, @@ -165,6 +178,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_VLAN, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_ipv6_v2, }, [RTE_FLOW_ITEM_TYPE_UDP] = { .copy_item = enic_copy_item_udp_v2, @@ -174,6 +188,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_IPV6, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_udp_v2, }, [RTE_FLOW_ITEM_TYPE_TCP] = { .copy_item = enic_copy_item_tcp_v2, @@ -183,6 +198,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_IPV6, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_tcp_v2, }, [RTE_FLOW_ITEM_TYPE_SCTP] = { .copy_item = enic_copy_item_sctp_v2, @@ -192,6 +208,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_IPV6, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, [RTE_FLOW_ITEM_TYPE_VXLAN] = { .copy_item = enic_copy_item_vxlan_v2, @@ -200,6 +217,7 @@ static const struct enic_items enic_items_v2[] = { RTE_FLOW_ITEM_TYPE_UDP, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, }; @@ -212,6 +230,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_UDP, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, [RTE_FLOW_ITEM_TYPE_ETH] = { .copy_item = enic_copy_item_eth_v2, @@ -220,6 +239,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_VXLAN, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_eth_v2, }, [RTE_FLOW_ITEM_TYPE_VLAN] = { .copy_item = enic_copy_item_vlan_v2, @@ -228,6 +248,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_vlan_v2, }, [RTE_FLOW_ITEM_TYPE_IPV4] = { .copy_item = enic_copy_item_ipv4_v2, @@ -237,6 +258,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_VLAN, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_ipv4_v2, }, [RTE_FLOW_ITEM_TYPE_IPV6] = { .copy_item = enic_copy_item_ipv6_v2, @@ -246,6 +268,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_VLAN, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_ipv6_v2, }, [RTE_FLOW_ITEM_TYPE_UDP] = { .copy_item = enic_copy_item_udp_v2, @@ -255,6 +278,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_IPV6, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_udp_v2, }, [RTE_FLOW_ITEM_TYPE_TCP] = { .copy_item = enic_copy_item_tcp_v2, @@ -264,6 +288,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_IPV6, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = enic_copy_item_inner_tcp_v2, }, [RTE_FLOW_ITEM_TYPE_SCTP] = { .copy_item = enic_copy_item_sctp_v2, @@ -273,6 +298,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_IPV6, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, [RTE_FLOW_ITEM_TYPE_VXLAN] = { .copy_item = enic_copy_item_vxlan_v2, @@ -281,6 +307,7 @@ static const struct enic_items enic_items_v3[] = { RTE_FLOW_ITEM_TYPE_UDP, RTE_FLOW_ITEM_TYPE_END, }, + .inner_copy_item = NULL, }, }; @@ -326,17 +353,6 @@ static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = { RTE_FLOW_ACTION_TYPE_END, }; -static const enum rte_flow_action_type enic_supported_actions_v2_count[] = { - RTE_FLOW_ACTION_TYPE_QUEUE, - RTE_FLOW_ACTION_TYPE_MARK, - RTE_FLOW_ACTION_TYPE_FLAG, - RTE_FLOW_ACTION_TYPE_DROP, - RTE_FLOW_ACTION_TYPE_COUNT, - RTE_FLOW_ACTION_TYPE_RSS, - RTE_FLOW_ACTION_TYPE_PASSTHRU, - RTE_FLOW_ACTION_TYPE_END, -}; - /** Action capabilities indexed by NIC version information */ static const struct enic_action_cap enic_action_cap[] = { [FILTER_ACTION_RQ_STEERING_FLAG] = { @@ -351,14 +367,10 @@ static const struct enic_action_cap enic_action_cap[] = { .actions = enic_supported_actions_v2_drop, .copy_fn = enic_copy_action_v2, }, - [FILTER_ACTION_COUNTER_FLAG] = { - .actions = enic_supported_actions_v2_count, - .copy_fn = enic_copy_action_v2, - }, }; static int -mask_exact_match(const u8 *supported, const u8 *supplied, +mask_exact_match(const uint8_t *supported, const uint8_t *supplied, unsigned int size) { unsigned int i; @@ -374,33 +386,29 @@ enic_copy_item_ipv4_v1(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; - struct ipv4_hdr supported_mask = { + struct rte_ipv4_hdr supported_mask = { .src_addr = 0xffffffff, .dst_addr = 0xffffffff, }; - FLOW_TRACE(); - - if (*inner_ofst) - return ENOTSUP; + ENICPMD_FUNC_TRACE(); if (!mask) mask = &rte_flow_item_ipv4_mask; /* This is an exact match filter, both fields must be set */ if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) { - FLOW_LOG(ERR, "IPv4 exact match src/dst addr"); + ENICPMD_LOG(ERR, "IPv4 exact match src/dst addr"); return ENOTSUP; } /* check that the suppied mask exactly matches capabilty */ - if (!mask_exact_match((const u8 *)&supported_mask, - (const u8 *)item->mask, sizeof(*mask))) { - FLOW_LOG(ERR, "IPv4 exact match mask"); + if (!mask_exact_match((const uint8_t *)&supported_mask, + (const uint8_t *)item->mask, sizeof(*mask))) { + ENICPMD_LOG(ERR, "IPv4 exact match mask"); return ENOTSUP; } @@ -416,33 +424,29 @@ enic_copy_item_udp_v1(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; - struct udp_hdr supported_mask = { + struct rte_udp_hdr supported_mask = { .src_port = 0xffff, .dst_port = 0xffff, }; - FLOW_TRACE(); - - if (*inner_ofst) - return ENOTSUP; + ENICPMD_FUNC_TRACE(); if (!mask) mask = &rte_flow_item_udp_mask; /* This is an exact match filter, both ports must be set */ if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) { - FLOW_LOG(ERR, "UDP exact match src/dst addr"); + ENICPMD_LOG(ERR, "UDP exact match src/dst addr"); return ENOTSUP; } /* check that the suppied mask exactly matches capabilty */ - if (!mask_exact_match((const u8 *)&supported_mask, - (const u8 *)item->mask, sizeof(*mask))) { - FLOW_LOG(ERR, "UDP exact match mask"); + if (!mask_exact_match((const uint8_t *)&supported_mask, + (const uint8_t *)item->mask, sizeof(*mask))) { + ENICPMD_LOG(ERR, "UDP exact match mask"); return ENOTSUP; } @@ -459,33 +463,29 @@ enic_copy_item_tcp_v1(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4; - struct tcp_hdr supported_mask = { + struct rte_tcp_hdr supported_mask = { .src_port = 0xffff, .dst_port = 0xffff, }; - FLOW_TRACE(); - - if (*inner_ofst) - return ENOTSUP; + ENICPMD_FUNC_TRACE(); if (!mask) mask = &rte_flow_item_tcp_mask; /* This is an exact match filter, both ports must be set */ if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) { - FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr"); + ENICPMD_LOG(ERR, "TCPIPv4 exact match src/dst addr"); return ENOTSUP; } /* check that the suppied mask exactly matches capabilty */ - if (!mask_exact_match((const u8 *)&supported_mask, - (const u8 *)item->mask, sizeof(*mask))) { - FLOW_LOG(ERR, "TCP exact match mask"); + if (!mask_exact_match((const uint8_t *)&supported_mask, + (const uint8_t *)item->mask, sizeof(*mask))) { + ENICPMD_LOG(ERR, "TCP exact match mask"); return ENOTSUP; } @@ -497,19 +497,157 @@ enic_copy_item_tcp_v1(struct copy_item_args *arg) return 0; } +/* + * The common 'copy' function for all inner packet patterns. Patterns are + * first appended to the L5 pattern buffer. Then, since the NIC filter + * API has no special support for inner packet matching at the moment, + * we set EtherType and IP proto as necessary. + */ +static int +copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst, + const void *val, const void *mask, uint8_t val_size, + uint8_t proto_off, uint16_t proto_val, uint8_t proto_size) +{ + uint8_t *l5_mask, *l5_val; + uint8_t start_off; + + /* No space left in the L5 pattern buffer. */ + start_off = *inner_ofst; + if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN) + return ENOTSUP; + l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask; + l5_val = gp->layer[FILTER_GENERIC_1_L5].val; + /* Copy the pattern into the L5 buffer. */ + if (val) { + memcpy(l5_mask + start_off, mask, val_size); + memcpy(l5_val + start_off, val, val_size); + } + /* Set the protocol field in the previous header. */ + if (proto_off) { + void *m, *v; + + m = l5_mask + proto_off; + v = l5_val + proto_off; + if (proto_size == 1) { + *(uint8_t *)m = 0xff; + *(uint8_t *)v = (uint8_t)proto_val; + } else if (proto_size == 2) { + *(uint16_t *)m = 0xffff; + *(uint16_t *)v = proto_val; + } + } + /* All inner headers land in L5 buffer even if their spec is null. */ + *inner_ofst += val_size; + return 0; +} + +static int +enic_copy_item_inner_eth_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_eth_mask; + arg->l2_proto_off = *off + offsetof(struct rte_ether_hdr, ether_type); + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_ether_hdr), + 0 /* no previous protocol */, 0, 0); +} + +static int +enic_copy_item_inner_vlan_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + uint8_t eth_type_off; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_vlan_mask; + /* Append vlan header to L5 and set ether type = TPID */ + eth_type_off = arg->l2_proto_off; + arg->l2_proto_off = *off + offsetof(struct rte_vlan_hdr, eth_proto); + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_vlan_hdr), + eth_type_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN), 2); +} + +static int +enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_ipv4_mask; + /* Append ipv4 header to L5 and set ether type = ipv4 */ + arg->l3_proto_off = *off + offsetof(struct rte_ipv4_hdr, next_proto_id); + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_ipv4_hdr), + arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4), 2); +} + +static int +enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_ipv6_mask; + /* Append ipv6 header to L5 and set ether type = ipv6 */ + arg->l3_proto_off = *off + offsetof(struct rte_ipv6_hdr, proto); + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_ipv6_hdr), + arg->l2_proto_off, rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV6), 2); +} + +static int +enic_copy_item_inner_udp_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_udp_mask; + /* Append udp header to L5 and set ip proto = udp */ + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_udp_hdr), + arg->l3_proto_off, IPPROTO_UDP, 1); +} + +static int +enic_copy_item_inner_tcp_v2(struct copy_item_args *arg) +{ + const void *mask = arg->item->mask; + uint8_t *off = arg->inner_ofst; + + ENICPMD_FUNC_TRACE(); + if (!mask) + mask = &rte_flow_item_tcp_mask; + /* Append tcp header to L5 and set ip proto = tcp */ + return copy_inner_common(&arg->filter->u.generic_1, off, + arg->item->spec, mask, sizeof(struct rte_tcp_hdr), + arg->l3_proto_off, IPPROTO_TCP, 1); +} + static int enic_copy_item_eth_v2(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; - struct ether_hdr enic_spec; - struct ether_hdr enic_mask; + struct rte_ether_hdr enic_spec; + struct rte_ether_hdr enic_mask; const struct rte_flow_item_eth *spec = item->spec; const struct rte_flow_item_eth *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); /* Match all if no spec */ if (!spec) @@ -519,35 +657,22 @@ enic_copy_item_eth_v2(struct copy_item_args *arg) mask = &rte_flow_item_eth_mask; memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); enic_spec.ether_type = spec->type; enic_mask.ether_type = mask->type; - if (*inner_ofst == 0) { - /* outer header */ - memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask, - sizeof(struct ether_hdr)); - memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec, - sizeof(struct ether_hdr)); - } else { - /* inner header */ - if ((*inner_ofst + sizeof(struct ether_hdr)) > - FILTER_GENERIC_1_KEY_LEN) - return ENOTSUP; - /* Offset into L5 where inner Ethernet header goes */ - memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], - &enic_mask, sizeof(struct ether_hdr)); - memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], - &enic_spec, sizeof(struct ether_hdr)); - *inner_ofst += sizeof(struct ether_hdr); - } + /* outer header */ + memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask, + sizeof(struct rte_ether_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec, + sizeof(struct rte_ether_hdr)); return 0; } @@ -556,12 +681,13 @@ enic_copy_item_vlan_v2(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_vlan *spec = item->spec; const struct rte_flow_item_vlan *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; + struct rte_ether_hdr *eth_mask; + struct rte_ether_hdr *eth_val; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); /* Match all if no spec */ if (!spec) @@ -570,36 +696,35 @@ enic_copy_item_vlan_v2(struct copy_item_args *arg) if (!mask) mask = &rte_flow_item_vlan_mask; - if (*inner_ofst == 0) { - struct ether_hdr *eth_mask = - (void *)gp->layer[FILTER_GENERIC_1_L2].mask; - struct ether_hdr *eth_val = - (void *)gp->layer[FILTER_GENERIC_1_L2].val; - - /* Outer TPID cannot be matched */ - if (eth_mask->ether_type) - return ENOTSUP; - /* - * When packet matching, the VIC always compares vlan-stripped - * L2, regardless of vlan stripping settings. So, the inner type - * from vlan becomes the ether type of the eth header. - */ + eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask; + eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val; + /* Outer TPID cannot be matched */ + if (eth_mask->ether_type) + return ENOTSUP; + /* + * For recent models: + * When packet matching, the VIC always compares vlan-stripped + * L2, regardless of vlan stripping settings. So, the inner type + * from vlan becomes the ether type of the eth header. + * + * Older models w/o hardware vxlan parser have a different + * behavior when vlan stripping is disabled. In this case, + * vlan tag remains in the L2 buffer. + */ + if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) { + struct rte_vlan_hdr *vlan; + + vlan = (struct rte_vlan_hdr *)(eth_mask + 1); + vlan->eth_proto = mask->inner_type; + vlan = (struct rte_vlan_hdr *)(eth_val + 1); + vlan->eth_proto = spec->inner_type; + } else { eth_mask->ether_type = mask->inner_type; eth_val->ether_type = spec->inner_type; - /* For TCI, use the vlan mask/val fields (little endian). */ - gp->mask_vlan = rte_be_to_cpu_16(mask->tci); - gp->val_vlan = rte_be_to_cpu_16(spec->tci); - } else { - /* Inner header. Mask/Val start at *inner_ofst into L5 */ - if ((*inner_ofst + sizeof(struct vlan_hdr)) > - FILTER_GENERIC_1_KEY_LEN) - return ENOTSUP; - memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], - mask, sizeof(struct vlan_hdr)); - memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], - spec, sizeof(struct vlan_hdr)); - *inner_ofst += sizeof(struct vlan_hdr); } + /* For TCI, use the vlan mask/val fields (little endian). */ + gp->mask_vlan = rte_be_to_cpu_16(mask->tci); + gp->val_vlan = rte_be_to_cpu_16(spec->tci); return 0; } @@ -608,40 +733,27 @@ enic_copy_item_ipv4_v2(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_ipv4 *spec = item->spec; const struct rte_flow_item_ipv4 *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); - if (*inner_ofst == 0) { - /* Match IPv4 */ - gp->mask_flags |= FILTER_GENERIC_1_IPV4; - gp->val_flags |= FILTER_GENERIC_1_IPV4; + /* Match IPv4 */ + gp->mask_flags |= FILTER_GENERIC_1_IPV4; + gp->val_flags |= FILTER_GENERIC_1_IPV4; - /* Match all if no spec */ - if (!spec) - return 0; + /* Match all if no spec */ + if (!spec) + return 0; - if (!mask) - mask = &rte_flow_item_ipv4_mask; + if (!mask) + mask = &rte_flow_item_ipv4_mask; - memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, - sizeof(struct ipv4_hdr)); - memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, - sizeof(struct ipv4_hdr)); - } else { - /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */ - if ((*inner_ofst + sizeof(struct ipv4_hdr)) > - FILTER_GENERIC_1_KEY_LEN) - return ENOTSUP; - memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], - mask, sizeof(struct ipv4_hdr)); - memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], - spec, sizeof(struct ipv4_hdr)); - *inner_ofst += sizeof(struct ipv4_hdr); - } + memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, + sizeof(struct rte_ipv4_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, + sizeof(struct rte_ipv4_hdr)); return 0; } @@ -650,12 +762,11 @@ enic_copy_item_ipv6_v2(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_ipv6 *spec = item->spec; const struct rte_flow_item_ipv6 *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); /* Match IPv6 */ gp->mask_flags |= FILTER_GENERIC_1_IPV6; @@ -668,22 +779,10 @@ enic_copy_item_ipv6_v2(struct copy_item_args *arg) if (!mask) mask = &rte_flow_item_ipv6_mask; - if (*inner_ofst == 0) { - memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, - sizeof(struct ipv6_hdr)); - memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, - sizeof(struct ipv6_hdr)); - } else { - /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */ - if ((*inner_ofst + sizeof(struct ipv6_hdr)) > - FILTER_GENERIC_1_KEY_LEN) - return ENOTSUP; - memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], - mask, sizeof(struct ipv6_hdr)); - memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], - spec, sizeof(struct ipv6_hdr)); - *inner_ofst += sizeof(struct ipv6_hdr); - } + memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr, + sizeof(struct rte_ipv6_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr, + sizeof(struct rte_ipv6_hdr)); return 0; } @@ -692,12 +791,11 @@ enic_copy_item_udp_v2(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_udp *spec = item->spec; const struct rte_flow_item_udp *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); /* Match UDP */ gp->mask_flags |= FILTER_GENERIC_1_UDP; @@ -710,22 +808,10 @@ enic_copy_item_udp_v2(struct copy_item_args *arg) if (!mask) mask = &rte_flow_item_udp_mask; - if (*inner_ofst == 0) { - memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, - sizeof(struct udp_hdr)); - memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, - sizeof(struct udp_hdr)); - } else { - /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */ - if ((*inner_ofst + sizeof(struct udp_hdr)) > - FILTER_GENERIC_1_KEY_LEN) - return ENOTSUP; - memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], - mask, sizeof(struct udp_hdr)); - memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], - spec, sizeof(struct udp_hdr)); - *inner_ofst += sizeof(struct udp_hdr); - } + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, + sizeof(struct rte_udp_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, + sizeof(struct rte_udp_hdr)); return 0; } @@ -734,12 +820,11 @@ enic_copy_item_tcp_v2(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_tcp *spec = item->spec; const struct rte_flow_item_tcp *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); /* Match TCP */ gp->mask_flags |= FILTER_GENERIC_1_TCP; @@ -752,22 +837,10 @@ enic_copy_item_tcp_v2(struct copy_item_args *arg) if (!mask) return ENOTSUP; - if (*inner_ofst == 0) { - memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, - sizeof(struct tcp_hdr)); - memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, - sizeof(struct tcp_hdr)); - } else { - /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */ - if ((*inner_ofst + sizeof(struct tcp_hdr)) > - FILTER_GENERIC_1_KEY_LEN) - return ENOTSUP; - memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst], - mask, sizeof(struct tcp_hdr)); - memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst], - spec, sizeof(struct tcp_hdr)); - *inner_ofst += sizeof(struct tcp_hdr); - } + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, + sizeof(struct rte_tcp_hdr)); + memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, + sizeof(struct rte_tcp_hdr)); return 0; } @@ -776,33 +849,29 @@ enic_copy_item_sctp_v2(struct copy_item_args *arg) { const struct rte_flow_item *item = arg->item; struct filter_v2 *enic_filter = arg->filter; - uint8_t *inner_ofst = arg->inner_ofst; const struct rte_flow_item_sctp *spec = item->spec; const struct rte_flow_item_sctp *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; uint8_t *ip_proto_mask = NULL; uint8_t *ip_proto = NULL; - FLOW_TRACE(); - - if (*inner_ofst) - return ENOTSUP; + ENICPMD_FUNC_TRACE(); /* * The NIC filter API has no flags for "match sctp", so explicitly set * the protocol number in the IP pattern. */ if (gp->val_flags & FILTER_GENERIC_1_IPV4) { - struct ipv4_hdr *ip; - ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; + struct rte_ipv4_hdr *ip; + ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; ip_proto_mask = &ip->next_proto_id; - ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; + ip = (struct rte_ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; ip_proto = &ip->next_proto_id; } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) { - struct ipv6_hdr *ip; - ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; + struct rte_ipv6_hdr *ip; + ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask; ip_proto_mask = &ip->proto; - ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; + ip = (struct rte_ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val; ip_proto = &ip->proto; } else { /* Need IPv4/IPv6 pattern first */ @@ -819,9 +888,9 @@ enic_copy_item_sctp_v2(struct copy_item_args *arg) mask = &rte_flow_item_sctp_mask; memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr, - sizeof(struct sctp_hdr)); + sizeof(struct rte_sctp_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr, - sizeof(struct sctp_hdr)); + sizeof(struct rte_sctp_hdr)); return 0; } @@ -834,12 +903,9 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg) const struct rte_flow_item_vxlan *spec = item->spec; const struct rte_flow_item_vxlan *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - struct udp_hdr *udp; + struct rte_udp_hdr *udp; - FLOW_TRACE(); - - if (*inner_ofst) - return EINVAL; + ENICPMD_FUNC_TRACE(); /* * The NIC filter API has no flags for "match vxlan". Set UDP port to @@ -847,9 +913,9 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg) */ gp->mask_flags |= FILTER_GENERIC_1_UDP; gp->val_flags |= FILTER_GENERIC_1_UDP; - udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask; + udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask; udp->dst_port = 0xffff; - udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val; + udp = (struct rte_udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val; udp->dst_port = RTE_BE16(4789); /* Match all if no spec */ if (!spec) @@ -859,11 +925,11 @@ enic_copy_item_vxlan_v2(struct copy_item_args *arg) mask = &rte_flow_item_vxlan_mask; memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask, - sizeof(struct vxlan_hdr)); + sizeof(struct rte_vxlan_hdr)); memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec, - sizeof(struct vxlan_hdr)); + sizeof(struct rte_vxlan_hdr)); - *inner_ofst = sizeof(struct vxlan_hdr); + *inner_ofst = sizeof(struct rte_vxlan_hdr); return 0; } @@ -882,7 +948,7 @@ enic_copy_item_raw_v2(struct copy_item_args *arg) const struct rte_flow_item_raw *mask = item->mask; struct filter_generic_1 *gp = &enic_filter->u.generic_1; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); /* Cannot be used for inner packet */ if (*inner_ofst) @@ -894,7 +960,8 @@ enic_copy_item_raw_v2(struct copy_item_args *arg) if (!spec->relative || spec->offset != 0 || spec->search || spec->limit) return EINVAL; /* Need non-null pattern that fits within the NIC's filter pattern */ - if (spec->length == 0 || spec->length > FILTER_GENERIC_1_KEY_LEN || + if (spec->length == 0 || + spec->length + sizeof(struct rte_udp_hdr) > FILTER_GENERIC_1_KEY_LEN || !spec->pattern || !mask->pattern) return EINVAL; /* @@ -907,9 +974,9 @@ enic_copy_item_raw_v2(struct copy_item_args *arg) */ if (mask->length != 0 && mask->length < spec->length) return EINVAL; - memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr), + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr), mask->pattern, spec->length); - memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr), + memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr), spec->pattern, spec->length); return 0; @@ -928,11 +995,11 @@ enic_copy_item_raw_v2(struct copy_item_args *arg) */ static int item_stacking_valid(enum rte_flow_item_type prev_item, - const struct enic_items *item_info, u8 is_first_item) + const struct enic_items *item_info, uint8_t is_first_item) { enum rte_flow_item_type const *allowed_items = item_info->prev_items; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) { if (prev_item == *allowed_items) @@ -961,11 +1028,11 @@ fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp, if (!(inner_ofst > 0 && enic->vxlan)) return; - FLOW_TRACE(); - vxlan = sizeof(struct vxlan_hdr); - memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr), + ENICPMD_FUNC_TRACE(); + vxlan = sizeof(struct rte_vxlan_hdr); + memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct rte_udp_hdr), gp->layer[FILTER_GENERIC_1_L5].mask, vxlan); - memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr), + memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct rte_udp_hdr), gp->layer[FILTER_GENERIC_1_L5].val, vxlan); inner = inner_ofst - vxlan; memset(layer, 0, sizeof(layer)); @@ -996,18 +1063,20 @@ enic_copy_filter(const struct rte_flow_item pattern[], { int ret; const struct rte_flow_item *item = pattern; - u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */ + uint8_t inner_ofst = 0; /* If encapsulated, ofst into L5 */ enum rte_flow_item_type prev_item; const struct enic_items *item_info; struct copy_item_args args; - u8 is_first_item = 1; + enic_copy_item_fn *copy_fn; + uint8_t is_first_item = 1; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); prev_item = 0; args.filter = enic_filter; args.inner_ofst = &inner_ofst; + args.enic = enic; for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) { /* Get info about how to validate and copy the item. If NULL * is returned the nic does not support the item. @@ -1017,7 +1086,8 @@ enic_copy_filter(const struct rte_flow_item pattern[], item_info = &cap->item_info[item->type]; if (item->type > cap->max_item_type || - item_info->copy_item == NULL) { + item_info->copy_item == NULL || + (inner_ofst > 0 && item_info->inner_copy_item == NULL)) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, NULL, "Unsupported item."); @@ -1029,7 +1099,9 @@ enic_copy_filter(const struct rte_flow_item pattern[], goto stacking_error; args.item = item; - ret = item_info->copy_item(&args); + copy_fn = inner_ofst > 0 ? item_info->inner_copy_item : + item_info->copy_item; + ret = copy_fn(&args); if (ret) goto item_not_supported; prev_item = item->type; @@ -1067,7 +1139,7 @@ enic_copy_action_v1(__rte_unused struct enic *enic, enum { FATE = 1, }; uint32_t overlap = 0; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) @@ -1115,7 +1187,7 @@ enic_copy_action_v2(struct enic *enic, uint32_t overlap = 0; bool passthru = false; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { switch (actions->type) { @@ -1171,10 +1243,6 @@ enic_copy_action_v2(struct enic *enic, enic_action->flags |= FILTER_ACTION_DROP_FLAG; break; } - case RTE_FLOW_ACTION_TYPE_COUNT: { - enic_action->flags |= FILTER_ACTION_COUNTER_FLAG; - break; - } case RTE_FLOW_ACTION_TYPE_RSS: { const struct rte_flow_action_rss *rss = (const struct rte_flow_action_rss *) @@ -1266,9 +1334,7 @@ enic_get_action_cap(struct enic *enic) uint8_t actions; actions = enic->filter_actions; - if (actions & FILTER_ACTION_COUNTER_FLAG) - ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG]; - else if (actions & FILTER_ACTION_DROP_FLAG) + if (actions & FILTER_ACTION_DROP_FLAG) ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG]; else if (actions & FILTER_ACTION_FILTER_ID_FLAG) ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG]; @@ -1282,14 +1348,14 @@ static void enic_dump_actions(const struct filter_action_v2 *ea) { if (ea->type == FILTER_ACTION_RQ_STEERING) { - FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx); + ENICPMD_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx); } else if (ea->type == FILTER_ACTION_V2) { - FLOW_LOG(INFO, "Actions(V2)\n"); + ENICPMD_LOG(INFO, "Actions(V2)\n"); if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG) - FLOW_LOG(INFO, "\tqueue: %u\n", + ENICPMD_LOG(INFO, "\tqueue: %u\n", enic_sop_rq_idx_to_rte_idx(ea->rq_idx)); if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG) - FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id); + ENICPMD_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id); } } @@ -1305,13 +1371,13 @@ enic_dump_filter(const struct filter_v2 *filt) switch (filt->type) { case FILTER_IPV4_5TUPLE: - FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n"); + ENICPMD_LOG(INFO, "FILTER_IPV4_5TUPLE\n"); break; case FILTER_USNIC_IP: case FILTER_DPDK_1: /* FIXME: this should be a loop */ gp = &filt->u.generic_1; - FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n", + ENICPMD_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n", gp->val_vlan, gp->mask_vlan); if (gp->mask_flags & FILTER_GENERIC_1_IPV4) @@ -1369,7 +1435,7 @@ enic_dump_filter(const struct filter_v2 *filt) ? "ipfrag(y)" : "ipfrag(n)"); else sprintf(ipfrag, "%s ", "ipfrag(x)"); - FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp, + ENICPMD_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp, tcp, tcpudp, ip4csum, l4csum, ipfrag); for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) { @@ -1386,7 +1452,7 @@ enic_dump_filter(const struct filter_v2 *filt) bp += 2; } *bp = '\0'; - FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf); + ENICPMD_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf); bp = buf; for (j = 0; j <= mbyte; j++) { sprintf(bp, "%02x", @@ -1394,11 +1460,11 @@ enic_dump_filter(const struct filter_v2 *filt) bp += 2; } *bp = '\0'; - FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf); + ENICPMD_LOG(INFO, "\tL%u val: %s\n", i + 2, buf); } break; default: - FLOW_LOG(INFO, "FILTER UNKNOWN\n"); + ENICPMD_LOG(INFO, "FILTER UNKNOWN\n"); break; } } @@ -1440,7 +1506,7 @@ enic_flow_parse(struct rte_eth_dev *dev, const struct enic_action_cap *enic_action_cap; const struct rte_flow_action *action; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); memset(enic_filter, 0, sizeof(*enic_filter)); memset(enic_action, 0, sizeof(*enic_action)); @@ -1553,10 +1619,8 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, struct rte_flow *flow; int err; uint16_t entry; - int ctr_idx; - int last_max_flow_ctr; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); flow = rte_calloc(__func__, 1, sizeof(*flow), 0); if (!flow) { @@ -1565,34 +1629,6 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, return NULL; } - flow->counter_idx = -1; - last_max_flow_ctr = -1; - if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) { - if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) { - rte_flow_error_set(error, ENOMEM, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "cannot allocate counter"); - goto unwind_flow_alloc; - } - flow->counter_idx = ctr_idx; - enic_action->counter_index = ctr_idx; - - /* If index is the largest, increase the counter DMA size */ - if (ctr_idx > enic->max_flow_counter) { - err = vnic_dev_counter_dma_cfg(enic->vdev, - VNIC_FLOW_COUNTER_UPDATE_MSECS, - ctr_idx + 1); - if (err) { - rte_flow_error_set(error, -err, - RTE_FLOW_ERROR_TYPE_ACTION_CONF, - NULL, "counter DMA config failed"); - goto unwind_ctr_alloc; - } - last_max_flow_ctr = enic->max_flow_counter; - enic->max_flow_counter = ctr_idx; - } - } - /* entry[in] is the queue id, entry[out] is the filter Id for delete */ entry = enic_action->rq_idx; err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter, @@ -1600,29 +1636,13 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, if (err) { rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "vnic_dev_classifier error"); - goto unwind_ctr_dma_cfg; + rte_free(flow); + return NULL; } flow->enic_filter_id = entry; flow->enic_filter = *enic_filter; - return flow; - -/* unwind if there are errors */ -unwind_ctr_dma_cfg: - if (last_max_flow_ctr != -1) { - /* reduce counter DMA size */ - vnic_dev_counter_dma_cfg(enic->vdev, - VNIC_FLOW_COUNTER_UPDATE_MSECS, - last_max_flow_ctr + 1); - enic->max_flow_counter = last_max_flow_ctr; - } -unwind_ctr_alloc: - if (flow->counter_idx != -1) - vnic_dev_counter_free(enic->vdev, ctr_idx); -unwind_flow_alloc: - rte_free(flow); - return NULL; } /** @@ -1640,10 +1660,10 @@ static int enic_flow_del_filter(struct enic *enic, struct rte_flow *flow, struct rte_flow_error *error) { - u16 filter_id; + uint16_t filter_id; int err; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); filter_id = flow->enic_filter_id; err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL); @@ -1652,13 +1672,6 @@ enic_flow_del_filter(struct enic *enic, struct rte_flow *flow, NULL, "vnic_dev_classifier failed"); return -err; } - - if (flow->counter_idx != -1) { - if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx)) - dev_err(enic, "counter free failed, idx: %d\n", - flow->counter_idx); - flow->counter_idx = -1; - } return 0; } @@ -1682,7 +1695,7 @@ enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs, struct filter_action_v2 enic_action; int ret; - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter, &enic_action); @@ -1710,19 +1723,17 @@ enic_flow_create(struct rte_eth_dev *dev, struct rte_flow *flow; struct enic *enic = pmd_priv(dev); - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter, &enic_action); if (ret < 0) return NULL; - rte_spinlock_lock(&enic->flows_lock); flow = enic_flow_add_filter(enic, &enic_filter, &enic_action, error); if (flow) LIST_INSERT_HEAD(&enic->flows, flow, next); - rte_spinlock_unlock(&enic->flows_lock); return flow; } @@ -1739,12 +1750,10 @@ enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, { struct enic *enic = pmd_priv(dev); - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); - rte_spinlock_lock(&enic->flows_lock); enic_flow_del_filter(enic, flow, error); LIST_REMOVE(flow, next); - rte_spinlock_unlock(&enic->flows_lock); rte_free(flow); return 0; } @@ -1761,9 +1770,8 @@ enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) struct rte_flow *flow; struct enic *enic = pmd_priv(dev); - FLOW_TRACE(); + ENICPMD_FUNC_TRACE(); - rte_spinlock_lock(&enic->flows_lock); while (!LIST_EMPTY(&enic->flows)) { flow = LIST_FIRST(&enic->flows); @@ -1771,70 +1779,6 @@ enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) LIST_REMOVE(flow, next); rte_free(flow); } - rte_spinlock_unlock(&enic->flows_lock); - return 0; -} - -static int -enic_flow_query_count(struct rte_eth_dev *dev, - struct rte_flow *flow, void *data, - struct rte_flow_error *error) -{ - struct enic *enic = pmd_priv(dev); - struct rte_flow_query_count *query; - uint64_t packets, bytes; - - FLOW_TRACE(); - - if (flow->counter_idx == -1) { - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "flow does not have counter"); - } - query = (struct rte_flow_query_count *)data; - if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx, - !!query->reset, &packets, &bytes)) { - return rte_flow_error_set - (error, EINVAL, - RTE_FLOW_ERROR_TYPE_UNSPECIFIED, - NULL, - "cannot read counter"); - } - query->hits_set = 1; - query->bytes_set = 1; - query->hits = packets; - query->bytes = bytes; - return 0; -} - -static int -enic_flow_query(struct rte_eth_dev *dev, - struct rte_flow *flow, - const struct rte_flow_action *actions, - void *data, - struct rte_flow_error *error) -{ - int ret = 0; - - FLOW_TRACE(); - - for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { - switch (actions->type) { - case RTE_FLOW_ACTION_TYPE_VOID: - break; - case RTE_FLOW_ACTION_TYPE_COUNT: - ret = enic_flow_query_count(dev, flow, data, error); - break; - default: - return rte_flow_error_set(error, ENOTSUP, - RTE_FLOW_ERROR_TYPE_ACTION, - actions, - "action not supported"); - } - if (ret < 0) - return ret; - } return 0; } @@ -1848,5 +1792,4 @@ const struct rte_flow_ops enic_flow_ops = { .create = enic_flow_create, .destroy = enic_flow_destroy, .flush = enic_flow_flush, - .query = enic_flow_query, };