X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_clsf.c;h=e206123ba53fe008bb8a626322dc3b8e8f2ac5ae;hb=c47d6e83334e656f85e4bb6881cf63da38276b0a;hp=3ef1d083222a9a90a11b13cb70c9b6cfe6b1115c;hpb=ffc905f3b856b96c6d8d864dba4052104fae4064;p=dpdk.git diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c index 3ef1d08322..e206123ba5 100644 --- a/drivers/net/enic/enic_clsf.c +++ b/drivers/net/enic/enic_clsf.c @@ -3,8 +3,6 @@ * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ -#include - #include #include #include @@ -13,7 +11,6 @@ #include #include #include -#include #include "enic_compat.h" #include "enic.h" @@ -38,6 +35,13 @@ #define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX +static void copy_fltr_v1(struct filter_v2 *fltr, + const struct rte_eth_fdir_input *input, + const struct rte_eth_fdir_masks *masks); +static void copy_fltr_v2(struct filter_v2 *fltr, + const struct rte_eth_fdir_input *input, + const struct rte_eth_fdir_masks *masks); + void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats) { *stats = enic->fdir.stats; @@ -51,7 +55,7 @@ void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info) void enic_fdir_info(struct enic *enic) { - enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT; + enic->fdir.modes = (uint32_t)RTE_FDIR_MODE_PERFECT; enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP | 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP; if (enic->adv_filters) { @@ -81,9 +85,9 @@ enic_set_layer(struct filter_generic_1 *gp, unsigned int flag, /* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs * without advanced filter support. */ -void -copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, - __rte_unused struct rte_eth_fdir_masks *masks) +static void +copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input, + __rte_unused const struct rte_eth_fdir_masks *masks) { fltr->type = FILTER_IPV4_5TUPLE; fltr->u.ipv4.src_addr = rte_be_to_cpu_32( @@ -106,18 +110,17 @@ copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, /* Copy Flow Director filter to a VIC generic filter (requires advanced * filter support. */ -void -copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, - struct rte_eth_fdir_masks *masks) +static void +copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input, + const struct rte_eth_fdir_masks *masks) { struct filter_generic_1 *gp = &fltr->u.generic_1; - int i; fltr->type = FILTER_DPDK_1; memset(gp, 0, sizeof(*gp)); if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) { - struct udp_hdr udp_mask, udp_val; + struct rte_udp_hdr udp_mask, udp_val; memset(&udp_mask, 0, sizeof(udp_mask)); memset(&udp_val, 0, sizeof(udp_val)); @@ -131,9 +134,9 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, } enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4, - &udp_mask, &udp_val, sizeof(struct udp_hdr)); + &udp_mask, &udp_val, sizeof(struct rte_udp_hdr)); } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) { - struct tcp_hdr tcp_mask, tcp_val; + struct rte_tcp_hdr tcp_mask, tcp_val; memset(&tcp_mask, 0, sizeof(tcp_mask)); memset(&tcp_val, 0, sizeof(tcp_val)); @@ -147,9 +150,9 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, } enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4, - &tcp_mask, &tcp_val, sizeof(struct tcp_hdr)); + &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr)); } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) { - struct sctp_hdr sctp_mask, sctp_val; + struct rte_sctp_hdr sctp_mask, sctp_val; memset(&sctp_mask, 0, sizeof(sctp_mask)); memset(&sctp_val, 0, sizeof(sctp_val)); @@ -166,20 +169,22 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, sctp_val.tag = input->flow.sctp4_flow.verify_tag; } - /* v4 proto should be 132, override ip4_flow.proto */ - input->flow.ip4_flow.proto = 132; - + /* + * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware + * has no "packet is SCTP" flag. Use flag=0 (generic L4) and + * manually set proto_id=sctp below. + */ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask, - &sctp_val, sizeof(struct sctp_hdr)); + &sctp_val, sizeof(struct rte_sctp_hdr)); } if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP || input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP || input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP || input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) { - struct ipv4_hdr ip4_mask, ip4_val; - memset(&ip4_mask, 0, sizeof(struct ipv4_hdr)); - memset(&ip4_val, 0, sizeof(struct ipv4_hdr)); + struct rte_ipv4_hdr ip4_mask, ip4_val; + memset(&ip4_mask, 0, sizeof(struct rte_ipv4_hdr)); + memset(&ip4_val, 0, sizeof(struct rte_ipv4_hdr)); if (input->flow.ip4_flow.tos) { ip4_mask.type_of_service = masks->ipv4_mask.tos; @@ -192,6 +197,10 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, if (input->flow.ip4_flow.proto) { ip4_mask.next_proto_id = masks->ipv4_mask.proto; ip4_val.next_proto_id = input->flow.ip4_flow.proto; + } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) { + /* Explicitly match the SCTP protocol number */ + ip4_mask.next_proto_id = 0xff; + ip4_val.next_proto_id = IPPROTO_SCTP; } if (input->flow.ip4_flow.src_ip) { ip4_mask.src_addr = masks->ipv4_mask.src_ip; @@ -203,11 +212,11 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, } enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3, - &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr)); + &ip4_mask, &ip4_val, sizeof(struct rte_ipv4_hdr)); } if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) { - struct udp_hdr udp_mask, udp_val; + struct rte_udp_hdr udp_mask, udp_val; memset(&udp_mask, 0, sizeof(udp_mask)); memset(&udp_val, 0, sizeof(udp_val)); @@ -220,9 +229,9 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, udp_val.dst_port = input->flow.udp6_flow.dst_port; } enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4, - &udp_mask, &udp_val, sizeof(struct udp_hdr)); + &udp_mask, &udp_val, sizeof(struct rte_udp_hdr)); } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) { - struct tcp_hdr tcp_mask, tcp_val; + struct rte_tcp_hdr tcp_mask, tcp_val; memset(&tcp_mask, 0, sizeof(tcp_mask)); memset(&tcp_val, 0, sizeof(tcp_val)); @@ -235,9 +244,9 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, tcp_val.dst_port = input->flow.tcp6_flow.dst_port; } enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4, - &tcp_mask, &tcp_val, sizeof(struct tcp_hdr)); + &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr)); } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) { - struct sctp_hdr sctp_mask, sctp_val; + struct rte_sctp_hdr sctp_mask, sctp_val; memset(&sctp_mask, 0, sizeof(sctp_mask)); memset(&sctp_val, 0, sizeof(sctp_val)); @@ -254,37 +263,34 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, sctp_val.tag = input->flow.sctp6_flow.verify_tag; } - /* v4 proto should be 132, override ipv6_flow.proto */ - input->flow.ipv6_flow.proto = 132; - enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask, - &sctp_val, sizeof(struct sctp_hdr)); + &sctp_val, sizeof(struct rte_sctp_hdr)); } if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP || input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP || input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP || input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) { - struct ipv6_hdr ipv6_mask, ipv6_val; - memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr)); - memset(&ipv6_val, 0, sizeof(struct ipv6_hdr)); + struct rte_ipv6_hdr ipv6_mask, ipv6_val; + memset(&ipv6_mask, 0, sizeof(struct rte_ipv6_hdr)); + memset(&ipv6_val, 0, sizeof(struct rte_ipv6_hdr)); if (input->flow.ipv6_flow.proto) { ipv6_mask.proto = masks->ipv6_mask.proto; ipv6_val.proto = input->flow.ipv6_flow.proto; + } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) { + /* See comments for IPv4 SCTP above. */ + ipv6_mask.proto = 0xff; + ipv6_val.proto = IPPROTO_SCTP; } - for (i = 0; i < 4; i++) { - *(uint32_t *)&ipv6_mask.src_addr[i * 4] = - masks->ipv6_mask.src_ip[i]; - *(uint32_t *)&ipv6_val.src_addr[i * 4] = - input->flow.ipv6_flow.src_ip[i]; - } - for (i = 0; i < 4; i++) { - *(uint32_t *)&ipv6_mask.dst_addr[i * 4] = - masks->ipv6_mask.src_ip[i]; - *(uint32_t *)&ipv6_val.dst_addr[i * 4] = - input->flow.ipv6_flow.dst_ip[i]; - } + memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip, + sizeof(ipv6_mask.src_addr)); + memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip, + sizeof(ipv6_val.src_addr)); + memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip, + sizeof(ipv6_mask.dst_addr)); + memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip, + sizeof(ipv6_val.dst_addr)); if (input->flow.ipv6_flow.tc) { ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12; ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12; @@ -295,7 +301,7 @@ copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input, } enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3, - &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr)); + &ipv6_mask, &ipv6_val, sizeof(struct rte_ipv6_hdr)); } } @@ -331,11 +337,11 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) struct enic_fdir_node *key; struct filter_v2 fltr; int32_t pos; - u8 do_free = 0; - u16 old_fltr_id = 0; - u32 flowtype_supported; - u16 flex_bytes; - u16 queue; + uint8_t do_free = 0; + uint16_t old_fltr_id = 0; + uint32_t flowtype_supported; + uint16_t flex_bytes; + uint16_t queue; struct filter_action_v2 action; memset(&fltr, 0, sizeof(fltr)); @@ -458,7 +464,7 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) void enic_clsf_destroy(struct enic *enic) { - u32 index; + uint32_t index; struct enic_fdir_node *key; /* delete classifier entries */ for (index = 0; index < ENICPMD_FDIR_MAX; index++) {