* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*/
-#include <libgen.h>
-
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_hash.h>
#include <rte_byteorder.h>
#include <rte_tcp.h>
#include <rte_udp.h>
#include <rte_sctp.h>
-#include <rte_eth_ctrl.h>
#include "enic_compat.h"
#include "enic.h"
#define ENICPMD_CLSF_HASH_ENTRIES ENICPMD_FDIR_MAX
+static void copy_fltr_v1(struct filter_v2 *fltr,
+ const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks);
+static void copy_fltr_v2(struct filter_v2 *fltr,
+ const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks);
+
void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
{
*stats = enic->fdir.stats;
void enic_fdir_info(struct enic *enic)
{
- enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
+ enic->fdir.modes = (uint32_t)RTE_FDIR_MODE_PERFECT;
enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
if (enic->adv_filters) {
/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
* without advanced filter support.
*/
-void
-copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- __rte_unused struct rte_eth_fdir_masks *masks)
+static void
+copy_fltr_v1(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
+ __rte_unused const struct rte_eth_fdir_masks *masks)
{
fltr->type = FILTER_IPV4_5TUPLE;
fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
/* Copy Flow Director filter to a VIC generic filter (requires advanced
* filter support.
*/
-void
-copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
- struct rte_eth_fdir_masks *masks)
+static void
+copy_fltr_v2(struct filter_v2 *fltr, const struct rte_eth_fdir_input *input,
+ const struct rte_eth_fdir_masks *masks)
{
struct filter_generic_1 *gp = &fltr->u.generic_1;
- int i;
fltr->type = FILTER_DPDK_1;
memset(gp, 0, sizeof(*gp));
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
- struct udp_hdr udp_mask, udp_val;
+ struct rte_udp_hdr udp_mask, udp_val;
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
- &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
- struct tcp_hdr tcp_mask, tcp_val;
+ struct rte_tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
memset(&tcp_val, 0, sizeof(tcp_val));
}
enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
- &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
- struct sctp_hdr sctp_mask, sctp_val;
+ struct rte_sctp_hdr sctp_mask, sctp_val;
memset(&sctp_mask, 0, sizeof(sctp_mask));
memset(&sctp_val, 0, sizeof(sctp_val));
sctp_val.tag = input->flow.sctp4_flow.verify_tag;
}
- /* v4 proto should be 132, override ip4_flow.proto */
- input->flow.ip4_flow.proto = 132;
-
+ /*
+ * Unlike UDP/TCP (FILTER_GENERIC_1_{UDP,TCP}), the firmware
+ * has no "packet is SCTP" flag. Use flag=0 (generic L4) and
+ * manually set proto_id=sctp below.
+ */
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
- &sctp_val, sizeof(struct sctp_hdr));
+ &sctp_val, sizeof(struct rte_sctp_hdr));
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
- struct ipv4_hdr ip4_mask, ip4_val;
- memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
- memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
+ struct rte_ipv4_hdr ip4_mask, ip4_val;
+ memset(&ip4_mask, 0, sizeof(struct rte_ipv4_hdr));
+ memset(&ip4_val, 0, sizeof(struct rte_ipv4_hdr));
if (input->flow.ip4_flow.tos) {
ip4_mask.type_of_service = masks->ipv4_mask.tos;
if (input->flow.ip4_flow.proto) {
ip4_mask.next_proto_id = masks->ipv4_mask.proto;
ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+ /* Explicitly match the SCTP protocol number */
+ ip4_mask.next_proto_id = 0xff;
+ ip4_val.next_proto_id = IPPROTO_SCTP;
}
if (input->flow.ip4_flow.src_ip) {
ip4_mask.src_addr = masks->ipv4_mask.src_ip;
}
enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
- &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
+ &ip4_mask, &ip4_val, sizeof(struct rte_ipv4_hdr));
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
- struct udp_hdr udp_mask, udp_val;
+ struct rte_udp_hdr udp_mask, udp_val;
memset(&udp_mask, 0, sizeof(udp_mask));
memset(&udp_val, 0, sizeof(udp_val));
udp_val.dst_port = input->flow.udp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
- &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ &udp_mask, &udp_val, sizeof(struct rte_udp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
- struct tcp_hdr tcp_mask, tcp_val;
+ struct rte_tcp_hdr tcp_mask, tcp_val;
memset(&tcp_mask, 0, sizeof(tcp_mask));
memset(&tcp_val, 0, sizeof(tcp_val));
tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
}
enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
- &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ &tcp_mask, &tcp_val, sizeof(struct rte_tcp_hdr));
} else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
- struct sctp_hdr sctp_mask, sctp_val;
+ struct rte_sctp_hdr sctp_mask, sctp_val;
memset(&sctp_mask, 0, sizeof(sctp_mask));
memset(&sctp_val, 0, sizeof(sctp_val));
sctp_val.tag = input->flow.sctp6_flow.verify_tag;
}
- /* v4 proto should be 132, override ipv6_flow.proto */
- input->flow.ipv6_flow.proto = 132;
-
enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
- &sctp_val, sizeof(struct sctp_hdr));
+ &sctp_val, sizeof(struct rte_sctp_hdr));
}
if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
- struct ipv6_hdr ipv6_mask, ipv6_val;
- memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
- memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
+ struct rte_ipv6_hdr ipv6_mask, ipv6_val;
+ memset(&ipv6_mask, 0, sizeof(struct rte_ipv6_hdr));
+ memset(&ipv6_val, 0, sizeof(struct rte_ipv6_hdr));
if (input->flow.ipv6_flow.proto) {
ipv6_mask.proto = masks->ipv6_mask.proto;
ipv6_val.proto = input->flow.ipv6_flow.proto;
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+ /* See comments for IPv4 SCTP above. */
+ ipv6_mask.proto = 0xff;
+ ipv6_val.proto = IPPROTO_SCTP;
}
- for (i = 0; i < 4; i++) {
- *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
- masks->ipv6_mask.src_ip[i];
- *(uint32_t *)&ipv6_val.src_addr[i * 4] =
- input->flow.ipv6_flow.src_ip[i];
- }
- for (i = 0; i < 4; i++) {
- *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
- masks->ipv6_mask.src_ip[i];
- *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
- input->flow.ipv6_flow.dst_ip[i];
- }
+ memcpy(ipv6_mask.src_addr, masks->ipv6_mask.src_ip,
+ sizeof(ipv6_mask.src_addr));
+ memcpy(ipv6_val.src_addr, input->flow.ipv6_flow.src_ip,
+ sizeof(ipv6_val.src_addr));
+ memcpy(ipv6_mask.dst_addr, masks->ipv6_mask.dst_ip,
+ sizeof(ipv6_mask.dst_addr));
+ memcpy(ipv6_val.dst_addr, input->flow.ipv6_flow.dst_ip,
+ sizeof(ipv6_val.dst_addr));
if (input->flow.ipv6_flow.tc) {
ipv6_mask.vtc_flow = masks->ipv6_mask.tc << 12;
ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
}
enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
- &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
+ &ipv6_mask, &ipv6_val, sizeof(struct rte_ipv6_hdr));
}
}
struct enic_fdir_node *key;
struct filter_v2 fltr;
int32_t pos;
- u8 do_free = 0;
- u16 old_fltr_id = 0;
- u32 flowtype_supported;
- u16 flex_bytes;
- u16 queue;
+ uint8_t do_free = 0;
+ uint16_t old_fltr_id = 0;
+ uint32_t flowtype_supported;
+ uint16_t flex_bytes;
+ uint16_t queue;
struct filter_action_v2 action;
memset(&fltr, 0, sizeof(fltr));
void enic_clsf_destroy(struct enic *enic)
{
- u32 index;
+ uint32_t index;
struct enic_fdir_node *key;
/* delete classifier entries */
for (index = 0; index < ENICPMD_FDIR_MAX; index++) {