#include <unistd.h>
#include <stdarg.h>
#include <inttypes.h>
-#include <netinet/in.h>
#include <rte_byteorder.h>
#include <rte_common.h>
#include <rte_cycles.h>
#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_random.h>
#include <rte_dev.h>
#include "base/ixgbe_api.h"
#include "base/ixgbe_vf.h"
#include "base/ixgbe_common.h"
+#include "base/ixgbe_osdep.h"
#include "ixgbe_ethdev.h"
#include "ixgbe_bypass.h"
#include "ixgbe_rxtx.h"
/* l2_tunnel filter list structure */
struct ixgbe_eth_l2_tunnel_conf_ele {
TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
- struct rte_eth_l2_tunnel_conf filter_info;
+ struct ixgbe_l2_tunnel_conf filter_info;
+};
+/* rss filter list structure */
+struct ixgbe_rss_conf_ele {
+ TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
+ struct ixgbe_rte_flow_rss_conf filter_info;
};
/* ixgbe_flow memory list structure */
struct ixgbe_flow_mem {
TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
static struct ixgbe_ntuple_filter_list filter_ntuple_list;
static struct ixgbe_syn_filter_list filter_syn_list;
static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+static struct ixgbe_rss_filter_list filter_rss_list;
static struct ixgbe_flow_mem_list ixgbe_flow_list;
/**
}
/**
- * Please aware there's an asumption for all the parsers.
+ * Please be aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
/**
* Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
*/
}
/* Skip Ethernet */
if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
}
if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
/*Not supported last point for range*/
if (item->last) {
rte_flow_error_set(error,
return -rte_errno;
}
- ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
/**
* Only support src & dst addresses, protocol,
* others should be masked.
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((ipv4_mask->hdr.src_addr != 0 &&
+ ipv4_mask->hdr.src_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.dst_addr != 0 &&
+ ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
+ (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
+ ipv4_mask->hdr.next_proto_id != 0)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
filter->src_ip_mask = ipv4_mask->hdr.src_addr;
filter->proto_mask = ipv4_mask->hdr.next_proto_id;
- ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
filter->dst_ip = ipv4_spec->hdr.dst_addr;
filter->src_ip = ipv4_spec->hdr.src_addr;
filter->proto = ipv4_spec->hdr.next_proto_id;
}
if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
/**
* Only support src & dst ports, tcp flags,
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((tcp_mask->hdr.src_port != 0 &&
+ tcp_mask->hdr.src_port != UINT16_MAX) ||
+ (tcp_mask->hdr.dst_port != 0 &&
+ tcp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_port_mask = tcp_mask->hdr.dst_port;
filter->src_port_mask = tcp_mask->hdr.src_port;
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
filter->dst_port = tcp_spec->hdr.dst_port;
filter->src_port = tcp_spec->hdr.src_port;
filter->tcp_flags = tcp_spec->hdr.tcp_flags;
} else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
/**
* Only support src & dst ports,
item, "Not supported by ntuple filter");
return -rte_errno;
}
+ if ((udp_mask->hdr.src_port != 0 &&
+ udp_mask->hdr.src_port != UINT16_MAX) ||
+ (udp_mask->hdr.dst_port != 0 &&
+ udp_mask->hdr.dst_port != UINT16_MAX)) {
+ rte_flow_error_set(error,
+ EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
+ item, "Not supported by ntuple filter");
+ return -rte_errno;
+ }
filter->dst_port_mask = udp_mask->hdr.dst_port;
filter->src_port_mask = udp_mask->hdr.src_port;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
filter->dst_port = udp_spec->hdr.dst_port;
filter->src_port = udp_spec->hdr.src_port;
} else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
- sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
/**
* Only support src & dst ports,
filter->dst_port_mask = sctp_mask->hdr.dst_port;
filter->src_port_mask = sctp_mask->hdr.src_port;
- sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
filter->dst_port = sctp_spec->hdr.dst_port;
filter->src_port = sctp_spec->hdr.src_port;
} else {
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
if (ret)
return ret;
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
/* ESP flow not really a flow*/
if (filter->proto == IPPROTO_ESP)
return 0;
return -rte_errno;
}
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_spec = item->spec;
+ eth_mask = item->mask;
/* Mask bits of source MAC address must be full of 0.
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!is_zero_ether_addr(ð_mask->src) ||
- (!is_zero_ether_addr(ð_mask->dst) &&
- !is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ (!rte_is_zero_ether_addr(ð_mask->dst) &&
+ !rte_is_broadcast_ether_addr(ð_mask->dst))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ether address mask");
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (is_broadcast_ether_addr(ð_mask->dst)) {
+ if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
filter->mac_addr = eth_spec->dst;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
return -rte_errno;
}
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
if (ret)
return ret;
- /* Ixgbe doesn't support MAC address. */
- if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
- memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ITEM,
- NULL, "Not supported by ethertype filter");
- return -rte_errno;
- }
-
if (filter->queue >= dev->data->nb_rx_queues) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
return -rte_errno;
}
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
return -rte_errno;
}
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
- if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ tcp_spec = item->spec;
+ tcp_mask = item->mask;
+ if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
tcp_mask->hdr.src_port ||
tcp_mask->hdr.dst_port ||
tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_l2_tunnel_conf *filter,
+ struct ixgbe_l2_tunnel_conf *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
/* The first not void item should be e-tag. */
item = next_no_void_pattern(pattern, NULL);
if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by L2 tunnel filter");
}
if (!item->spec || !item->mask) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by L2 tunnel filter");
return -rte_errno;
return -rte_errno;
}
- e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
- e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
+ e_tag_spec = item->spec;
+ e_tag_mask = item->mask;
/* Only care about GRP and E cid base. */
if (e_tag_mask->epcp_edei_in_ecid_b ||
e_tag_mask->in_ecid_e ||
e_tag_mask->ecid_e ||
e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by L2 tunnel filter");
return -rte_errno;
}
- filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
+ filter->l2_tunnel_type = RTE_ETH_L2_TUNNEL_TYPE_E_TAG;
/**
* grp and e_cid_base are bit fields and only use 14 bits.
* e-tag id is taken as little endian by HW.
/* check if the next not void item is END */
item = next_no_void_pattern(pattern, item);
if (item->type != RTE_FLOW_ITEM_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by L2 tunnel filter");
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
/* not supported */
if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* not supported */
if (attr->priority) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Not support priority.");
act = next_no_void_action(actions, NULL);
if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
act->type != RTE_FLOW_ACTION_TYPE_PF) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
/* check if the next not void item is END */
act = next_no_void_action(actions, act);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_l2_tunnel_conf *l2_tn_filter,
+ struct ixgbe_l2_tunnel_conf *l2_tn_filter,
struct rte_flow_error *error)
{
int ret = 0;
if (hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
hw->mac.type != ixgbe_mac_X550EM_a) {
- memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "Not supported by L2 tunnel filter");
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* not supported */
if (attr->priority) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
break;
if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
- spec =
- (const struct rte_flow_item_fuzzy *)item->spec;
- last =
- (const struct rte_flow_item_fuzzy *)item->last;
- mask =
- (const struct rte_flow_item_fuzzy *)item->mask;
+ spec = item->spec;
+ last = item->last;
+ mask = item->mask;
if (!spec || !mask)
return 0;
if (item->spec) {
rule->b_spec = TRUE;
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_spec = item->spec;
/* Get the dst MAC. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
eth_spec->dst.addr_bytes[j];
}
if (item->mask) {
rule->b_mask = TRUE;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_mask = item->mask;
/* Ether type should be masked. */
if (eth_mask->type ||
* src MAC address must be masked,
* and don't support dst MAC address mask.
*/
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
if (eth_mask->src.addr_bytes[j] ||
eth_mask->dst.addr_bytes[j] != 0xFF) {
memset(rule, 0,
return -rte_errno;
}
} else {
- if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
+ if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
+ item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
return -rte_errno;
}
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
return -rte_errno;
}
rule->b_mask = TRUE;
- ipv4_mask =
- (const struct rte_flow_item_ipv4 *)item->mask;
+ ipv4_mask = item->mask;
if (ipv4_mask->hdr.version_ihl ||
ipv4_mask->hdr.type_of_service ||
ipv4_mask->hdr.total_length ||
if (item->spec) {
rule->b_spec = TRUE;
- ipv4_spec =
- (const struct rte_flow_item_ipv4 *)item->spec;
+ ipv4_spec = item->spec;
rule->ixgbe_fdir.formatted.dst_ip[0] =
ipv4_spec->hdr.dst_addr;
rule->ixgbe_fdir.formatted.src_ip[0] =
}
rule->b_mask = TRUE;
- ipv6_mask =
- (const struct rte_flow_item_ipv6 *)item->mask;
+ ipv6_mask = item->mask;
if (ipv6_mask->hdr.vtc_flow ||
ipv6_mask->hdr.payload_len ||
ipv6_mask->hdr.proto ||
if (item->spec) {
rule->b_spec = TRUE;
- ipv6_spec =
- (const struct rte_flow_item_ipv6 *)item->spec;
+ ipv6_spec = item->spec;
rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
ipv6_spec->hdr.src_addr, 16);
rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
return -rte_errno;
}
rule->b_mask = TRUE;
- tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
+ tcp_mask = item->mask;
if (tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
if (item->spec) {
rule->b_spec = TRUE;
- tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
+ tcp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
tcp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
return -rte_errno;
}
rule->b_mask = TRUE;
- udp_mask = (const struct rte_flow_item_udp *)item->mask;
+ udp_mask = item->mask;
if (udp_mask->hdr.dgram_len ||
udp_mask->hdr.dgram_cksum) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
if (item->spec) {
rule->b_spec = TRUE;
- udp_spec = (const struct rte_flow_item_udp *)item->spec;
+ udp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
udp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
return -rte_errno;
}
rule->b_mask = TRUE;
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
if (sctp_mask->hdr.tag ||
sctp_mask->hdr.cksum) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
if (item->spec) {
rule->b_spec = TRUE;
- sctp_spec =
- (const struct rte_flow_item_sctp *)item->spec;
+ sctp_spec = item->spec;
rule->ixgbe_fdir.formatted.src_port =
sctp_spec->hdr.src_port;
rule->ixgbe_fdir.formatted.dst_port =
}
/* others even sctp port is not supported */
} else {
- sctp_mask =
- (const struct rte_flow_item_sctp *)item->mask;
+ sctp_mask = item->mask;
if (sctp_mask &&
(sctp_mask->hdr.src_port ||
sctp_mask->hdr.dst_port ||
return -rte_errno;
}
- raw_mask = (const struct rte_flow_item_raw *)item->mask;
+ raw_mask = item->mask;
/* check mask */
if (raw_mask->relative != 0x1 ||
return -rte_errno;
}
- raw_spec = (const struct rte_flow_item_raw *)item->spec;
+ raw_spec = item->spec;
/* check spec */
if (raw_spec->relative != 0 ||
/* Get the VxLAN info */
if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_VXLAN;
+ IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
/* Only care about VNI, others should be masked. */
if (!item->mask) {
/* Tunnel type is always meaningful. */
rule->mask.tunnel_type_mask = 1;
- vxlan_mask =
- (const struct rte_flow_item_vxlan *)item->mask;
+ vxlan_mask = item->mask;
if (vxlan_mask->flags) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
if (item->spec) {
rule->b_spec = TRUE;
- vxlan_spec = (const struct rte_flow_item_vxlan *)
- item->spec;
+ vxlan_spec = item->spec;
rte_memcpy(((uint8_t *)
- &rule->ixgbe_fdir.formatted.tni_vni + 1),
+ &rule->ixgbe_fdir.formatted.tni_vni),
vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
- rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
- rule->ixgbe_fdir.formatted.tni_vni);
}
}
/* Get the NVGRE info */
if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
rule->ixgbe_fdir.formatted.tunnel_type =
- RTE_FDIR_TUNNEL_TYPE_NVGRE;
+ IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
/**
* Only care about flags0, flags1, protocol and TNI,
/* Tunnel type is always meaningful. */
rule->mask.tunnel_type_mask = 1;
- nvgre_mask =
- (const struct rte_flow_item_nvgre *)item->mask;
+ nvgre_mask = item->mask;
if (nvgre_mask->flow_id) {
memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
rte_flow_error_set(error, EINVAL,
if (item->spec) {
rule->b_spec = TRUE;
- nvgre_spec =
- (const struct rte_flow_item_nvgre *)item->spec;
+ nvgre_spec = item->spec;
if (nvgre_spec->c_k_s_rsvd0_ver !=
rte_cpu_to_be_16(0x2000) &&
nvgre_mask->c_k_s_rsvd0_ver) {
/* tni is a 24-bits bit field */
rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
- rule->ixgbe_fdir.formatted.tni_vni <<= 8;
}
}
return -rte_errno;
}
rule->b_mask = TRUE;
- eth_mask = (const struct rte_flow_item_eth *)item->mask;
+ eth_mask = item->mask;
/* Ether type should be masked. */
if (eth_mask->type) {
}
/* src MAC address should be masked. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
if (eth_mask->src.addr_bytes[j]) {
memset(rule, 0,
sizeof(struct ixgbe_fdir_rule));
}
}
rule->mask.mac_addr_byte_mask = 0;
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
/* It's a per byte mask. */
if (eth_mask->dst.addr_bytes[j] == 0xFF) {
rule->mask.mac_addr_byte_mask |= 0x1 << j;
if (item->spec) {
rule->b_spec = TRUE;
- eth_spec = (const struct rte_flow_item_eth *)item->spec;
+ eth_spec = item->spec;
/* Get the dst MAC. */
- for (j = 0; j < ETHER_ADDR_LEN; j++) {
+ for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
rule->ixgbe_fdir.formatted.inner_mac[j] =
eth_spec->dst.addr_bytes[j];
}
return -rte_errno;
}
- vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
- vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
+ vlan_spec = item->spec;
+ vlan_mask = item->mask;
rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
return ret;
}
+static int
+ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action actions[],
+ struct ixgbe_rte_flow_rss_conf *rss_conf,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_action *act;
+ const struct rte_flow_action_rss *rss;
+ uint16_t n;
+
+ /**
+ * rss only supports forwarding,
+ * check if the first not void action is RSS.
+ */
+ act = next_no_void_action(actions, NULL);
+ if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ rss = (const struct rte_flow_action_rss *)act->conf;
+
+ if (!rss || !rss->queue_num) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "no valid queues");
+ return -rte_errno;
+ }
+
+ for (n = 0; n < rss->queue_num; n++) {
+ if (rss->queue[n] >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act,
+ "queue id > max number of queues");
+ return -rte_errno;
+ }
+ }
+
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
+ if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS hash key must be exactly 40 bytes");
+ if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "too many queues for RSS context");
+ if (ixgbe_rss_conf_init(rss_conf, rss))
+ return rte_flow_error_set
+ (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "RSS context initialization failure");
+
+ /* check if the next not void item is END */
+ act = next_no_void_action(actions, act);
+ if (act->type != RTE_FLOW_ACTION_TYPE_END) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ act, "Not supported action.");
+ return -rte_errno;
+ }
+
+ /* parse attr */
+ /* must be input direction */
+ if (!attr->ingress) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr, "Only support ingress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->egress) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
+ attr, "Not support egress.");
+ return -rte_errno;
+ }
+
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
+ if (attr->priority > 0xFFFF) {
+ memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Error priority.");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/* remove the rss filter */
+static void
+ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
+{
+ struct ixgbe_filter_info *filter_info =
+ IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
+
+ if (filter_info->rss_info.conf.queue_num)
+ ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
+}
+
void
ixgbe_filterlist_init(void)
{
TAILQ_INIT(&filter_syn_list);
TAILQ_INIT(&filter_fdir_list);
TAILQ_INIT(&filter_l2_tunnel_list);
+ TAILQ_INIT(&filter_rss_list);
TAILQ_INIT(&ixgbe_flow_list);
}
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
TAILQ_REMOVE(&filter_ntuple_list,
rte_free(fdir_rule_ptr);
}
+ while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr,
+ entries);
+ rte_free(rss_filter_ptr);
+ }
+
while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
TAILQ_REMOVE(&ixgbe_flow_list,
ixgbe_flow_mem_ptr,
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
struct ixgbe_fdir_rule fdir_rule;
- struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_l2_tunnel_conf l2_tn_filter;
struct ixgbe_hw_fdir_info *fdir_info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_rte_flow_rss_conf rss_conf;
struct rte_flow *flow = NULL;
struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
uint8_t first_mask = FALSE;
ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
actions, &ntuple_filter, error);
-#ifdef RTE_LIBRTE_SECURITY
+#ifdef RTE_LIB_SECURITY
/* ESP flow not really a flow*/
if (ntuple_filter.proto == IPPROTO_ESP)
return flow;
rte_memcpy(&fdir_info->mask,
&fdir_rule.mask,
sizeof(struct ixgbe_hw_fdir_mask));
- fdir_info->flex_bytes_offset =
- fdir_rule.flex_bytes_offset;
- if (fdir_rule.mask.flex_bytes_mask)
- ixgbe_fdir_set_flexbytes_offset(dev,
+ if (fdir_rule.mask.flex_bytes_mask) {
+ ret = ixgbe_fdir_set_flexbytes_offset(dev,
fdir_rule.flex_bytes_offset);
-
+ if (ret)
+ goto out;
+ }
ret = ixgbe_fdir_set_input_mask(dev);
if (ret)
goto out;
if (ret)
goto out;
- if (fdir_info->flex_bytes_offset !=
- fdir_rule.flex_bytes_offset)
+ if (fdir_rule.mask.flex_bytes_mask &&
+ fdir_info->flex_bytes_offset !=
+ fdir_rule.flex_bytes_offset)
goto out;
}
}
goto out;
}
- memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
actions, &l2_tn_filter, error);
if (!ret) {
}
rte_memcpy(&l2_tn_filter_ptr->filter_info,
&l2_tn_filter,
- sizeof(struct rte_eth_l2_tunnel_conf));
+ sizeof(struct ixgbe_l2_tunnel_conf));
TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
l2_tn_filter_ptr, entries);
flow->rule = l2_tn_filter_ptr;
}
}
+ memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ ret = ixgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
+ if (!ret) {
+ ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
+ if (!ret) {
+ rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
+ sizeof(struct ixgbe_rss_conf_ele), 0);
+ if (!rss_filter_ptr) {
+ PMD_DRV_LOG(ERR, "failed to allocate memory");
+ goto out;
+ }
+ ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
+ &rss_conf.conf);
+ TAILQ_INSERT_TAIL(&filter_rss_list,
+ rss_filter_ptr, entries);
+ flow->rule = rss_filter_ptr;
+ flow->filter_type = RTE_ETH_FILTER_HASH;
+ return flow;
+ }
+ }
+
out:
TAILQ_REMOVE(&ixgbe_flow_list,
ixgbe_flow_mem_ptr, entries);
/**
* Check if the flow rule is supported by ixgbe.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int
struct rte_eth_ntuple_filter ntuple_filter;
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
- struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_l2_tunnel_conf l2_tn_filter;
struct ixgbe_fdir_rule fdir_rule;
+ struct ixgbe_rte_flow_rss_conf rss_conf;
int ret;
memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
if (!ret)
return 0;
- memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
+ memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
actions, &l2_tn_filter, error);
+ if (!ret)
+ return 0;
+
+ memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
+ ret = ixgbe_parse_rss_filter(dev, attr,
+ actions, &rss_conf, error);
return ret;
}
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
struct ixgbe_fdir_rule fdir_rule;
- struct rte_eth_l2_tunnel_conf l2_tn_filter;
+ struct ixgbe_l2_tunnel_conf l2_tn_filter;
struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
struct ixgbe_hw_fdir_info *fdir_info =
IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
+ struct ixgbe_rss_conf_ele *rss_filter_ptr;
switch (filter_type) {
case RTE_ETH_FILTER_NTUPLE:
l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
pmd_flow->rule;
rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
- sizeof(struct rte_eth_l2_tunnel_conf));
+ sizeof(struct ixgbe_l2_tunnel_conf));
ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
if (!ret) {
TAILQ_REMOVE(&filter_l2_tunnel_list,
rte_free(l2_tn_filter_ptr);
}
break;
+ case RTE_ETH_FILTER_HASH:
+ rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
+ pmd_flow->rule;
+ ret = ixgbe_config_rss_filter(dev,
+ &rss_filter_ptr->filter_info, FALSE);
+ if (!ret) {
+ TAILQ_REMOVE(&filter_rss_list,
+ rss_filter_ptr, entries);
+ rte_free(rss_filter_ptr);
+ }
+ break;
default:
PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
filter_type);
TAILQ_REMOVE(&ixgbe_flow_list,
ixgbe_flow_mem_ptr, entries);
rte_free(ixgbe_flow_mem_ptr);
+ break;
}
}
rte_free(flow);
return ret;
}
+ ixgbe_clear_rss_filter(dev);
+
ixgbe_filterlist_flush();
return 0;