#include <rte_debug.h>
#include <rte_pci.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
-#include <rte_ethdev_pci.h>
+#include <ethdev_driver.h>
+#include <ethdev_pci.h>
#include <rte_memory.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#define IGB_FLEX_RAW_NUM 12
+struct igb_flow_mem_list igb_flow_list;
+struct igb_ntuple_filter_list igb_filter_ntuple_list;
+struct igb_ethertype_filter_list igb_filter_ethertype_list;
+struct igb_syn_filter_list igb_filter_syn_list;
+struct igb_flex_filter_list igb_filter_flex_list;
+struct igb_rss_filter_list igb_filter_rss_list;
+
/**
- * Please aware there's an asumption for all the parsers.
+ * Please be aware there's an assumption for all the parsers.
* rte_flow_item is using big endian, rte_flow_attr and
* rte_flow_action are using CPU order.
* Because the pattern is used to describe the packets,
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
- item, "Not supported action.");
+ act, "Not supported action.");
return -rte_errno;
}
filter->queue =
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
rte_flow_error_set(error, EINVAL,
* Mask bits of destination MAC address must be full
* of 1 or full of 0.
*/
- if (!is_zero_ether_addr(ð_mask->src) ||
- (!is_zero_ether_addr(ð_mask->dst) &&
- !is_broadcast_ether_addr(ð_mask->dst))) {
+ if (!rte_is_zero_ether_addr(ð_mask->src) ||
+ (!rte_is_zero_ether_addr(ð_mask->dst) &&
+ !rte_is_broadcast_ether_addr(ð_mask->dst))) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Invalid ether address mask");
/* If mask bits of destination MAC address
* are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
*/
- if (is_broadcast_ether_addr(ð_mask->dst)) {
+ if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
filter->mac_addr = eth_spec->dst;
filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
} else {
return -rte_errno;
}
+ /* Not supported */
+ if (attr->transfer) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Not supported */
if (attr->priority) {
rte_flow_error_set(error, EINVAL,
}
}
- if (filter->ether_type == ETHER_TYPE_IPv4 ||
- filter->ether_type == ETHER_TYPE_IPv6) {
+ if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
+ filter->ether_type == RTE_ETHER_TYPE_IPV6) {
memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
tcp_spec = item->spec;
tcp_mask = item->mask;
- if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
+ if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
tcp_mask->hdr.src_port ||
tcp_mask->hdr.dst_port ||
tcp_mask->hdr.sent_seq ||
tcp_mask->hdr.recv_ack ||
tcp_mask->hdr.data_off ||
- tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
+ tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
tcp_mask->hdr.rx_win ||
tcp_mask->hdr.cksum ||
tcp_mask->hdr.tcp_urp) {
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct rte_eth_syn_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
/* Support 2 priorities, the lowest or highest. */
if (!attr->priority) {
filter->hig_pri = 0;
cons_parse_flex_filter(const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_flex_filter *filter,
+ struct igb_flex_filter *filter,
struct rte_flow_error *error)
{
const struct rte_flow_item *item;
if (!raw_mask->length ||
!raw_mask->relative) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by flex filter");
for (j = 0; j < raw_spec->length; j++) {
if (raw_mask->pattern[j] != 0xFF) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by flex filter");
}
if ((raw_spec->length + offset + total_offset) >
- RTE_FLEX_FILTER_MAXLEN) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ IGB_FLEX_FILTER_MAXLEN) {
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "Not supported by flex filter");
/* check if the first not void action is QUEUE. */
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
/* parse attr */
/* must be input direction */
if (!attr->ingress) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
attr, "Only support ingress.");
/* not supported */
if (attr->egress) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
attr, "Not support egress.");
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(filter, 0, sizeof(struct igb_flex_filter));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
attr, "Error priority.");
const struct rte_flow_attr *attr,
const struct rte_flow_item pattern[],
const struct rte_flow_action actions[],
- struct rte_eth_flex_filter *filter,
+ struct igb_flex_filter *filter,
struct rte_flow_error *error)
{
struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
actions, filter, error);
if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
- memset(filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(filter, 0, sizeof(struct igb_flex_filter));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
NULL, "queue number not supported by flex filter");
struct igb_rte_flow_rss_conf *rss_conf,
struct rte_flow_error *error)
{
+ struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
const struct rte_flow_action *act;
const struct rte_flow_action_rss *rss;
uint16_t n, index;
}
}
+ if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "non-default RSS hash functions are not supported");
+ if (rss->level)
+ return rte_flow_error_set
+ (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
+ "a nonzero RSS encapsulation level is not supported");
if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS hash key must be exactly 40 bytes");
- if (rss->queue_num > RTE_DIM(rss_conf->queue))
+ if (((hw->mac.type == e1000_82576) &&
+ (rss->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
+ ((hw->mac.type != e1000_82576) &&
+ (rss->queue_num > IGB_MAX_RX_QUEUE_NUM)))
return rte_flow_error_set
(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
"too many queues for RSS context");
- if (igb_rss_conf_init(rss_conf, rss))
+ if (igb_rss_conf_init(dev, rss_conf, rss))
return rte_flow_error_set
(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
"RSS context initialization failure");
index++;
NEXT_ITEM_OF_ACTION(act, actions, index);
if (act->type != RTE_FLOW_ACTION_TYPE_END) {
- memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION,
act, "Not supported action.");
return -rte_errno;
}
+ /* not supported */
+ if (attr->transfer) {
+ memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
+ attr, "No support for transfer.");
+ return -rte_errno;
+ }
+
if (attr->priority > 0xFFFF) {
memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
rte_flow_error_set(error, EINVAL,
struct rte_eth_ntuple_filter ntuple_filter;
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
- struct rte_eth_flex_filter flex_filter;
+ struct igb_flex_filter flex_filter;
struct igb_rte_flow_rss_conf rss_conf;
struct rte_flow *flow = NULL;
struct igb_ntuple_filter_ele *ntuple_filter_ptr;
goto out;
}
- memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(&flex_filter, 0, sizeof(struct igb_flex_filter));
ret = igb_parse_flex_filter(dev, attr, pattern,
actions, &flex_filter, error);
if (!ret) {
rte_memcpy(&flex_filter_ptr->filter_info,
&flex_filter,
- sizeof(struct rte_eth_flex_filter));
+ sizeof(struct igb_flex_filter));
TAILQ_INSERT_TAIL(&igb_filter_flex_list,
flex_filter_ptr, entries);
flow->rule = flex_filter_ptr;
PMD_DRV_LOG(ERR, "failed to allocate memory");
goto out;
}
- igb_rss_conf_init(&rss_filter_ptr->filter_info,
+ igb_rss_conf_init(dev, &rss_filter_ptr->filter_info,
&rss_conf.conf);
TAILQ_INSERT_TAIL(&igb_filter_rss_list,
rss_filter_ptr, entries);
/**
* Check if the flow rule is supported by igb.
- * It only checkes the format. Don't guarantee the rule can be programmed into
+ * It only checks the format. Don't guarantee the rule can be programmed into
* the HW. Because there can be no enough room for the rule.
*/
static int
struct rte_eth_ntuple_filter ntuple_filter;
struct rte_eth_ethertype_filter ethertype_filter;
struct rte_eth_syn_filter syn_filter;
- struct rte_eth_flex_filter flex_filter;
+ struct igb_flex_filter flex_filter;
struct igb_rte_flow_rss_conf rss_conf;
int ret;
if (!ret)
return 0;
- memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
+ memset(&flex_filter, 0, sizeof(struct igb_flex_filter));
ret = igb_parse_flex_filter(dev, attr, pattern,
actions, &flex_filter, error);
if (!ret)