#include <errno.h>
#include <stdint.h>
#include <rte_log.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_flow_driver.h>
#include <rte_ether.h>
#include <rte_ip.h>
/** True if it's OK for this item to be the first item. For some NIC
* versions, it's invalid to start the stack above layer 3.
*/
- const u8 valid_start_item;
+ const uint8_t valid_start_item;
/* Inner packet version of copy_item. */
enic_copy_item_fn *inner_copy_item;
};
};
static int
-mask_exact_match(const u8 *supported, const u8 *supplied,
+mask_exact_match(const uint8_t *supported, const uint8_t *supplied,
unsigned int size)
{
unsigned int i;
}
/* check that the suppied mask exactly matches capabilty */
- if (!mask_exact_match((const u8 *)&supported_mask,
- (const u8 *)item->mask, sizeof(*mask))) {
+ if (!mask_exact_match((const uint8_t *)&supported_mask,
+ (const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "IPv4 exact match mask");
return ENOTSUP;
}
}
/* check that the suppied mask exactly matches capabilty */
- if (!mask_exact_match((const u8 *)&supported_mask,
- (const u8 *)item->mask, sizeof(*mask))) {
+ if (!mask_exact_match((const uint8_t *)&supported_mask,
+ (const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "UDP exact match mask");
return ENOTSUP;
}
}
/* check that the suppied mask exactly matches capabilty */
- if (!mask_exact_match((const u8 *)&supported_mask,
- (const u8 *)item->mask, sizeof(*mask))) {
+ if (!mask_exact_match((const uint8_t *)&supported_mask,
+ (const uint8_t *)item->mask, sizeof(*mask))) {
ENICPMD_LOG(ERR, "TCP exact match mask");
return ENOTSUP;
}
*/
static int
item_stacking_valid(enum rte_flow_item_type prev_item,
- const struct enic_items *item_info, u8 is_first_item)
+ const struct enic_items *item_info, uint8_t is_first_item)
{
enum rte_flow_item_type const *allowed_items = item_info->prev_items;
{
int ret;
const struct rte_flow_item *item = pattern;
- u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
+ uint8_t inner_ofst = 0; /* If encapsulated, ofst into L5 */
enum rte_flow_item_type prev_item;
const struct enic_items *item_info;
struct copy_item_args args;
enic_copy_item_fn *copy_fn;
- u8 is_first_item = 1;
+ uint8_t is_first_item = 1;
ENICPMD_FUNC_TRACE();
const struct rte_flow_action_mark *mark =
(const struct rte_flow_action_mark *)
actions->conf;
-
+ if (enic->use_noscatter_vec_rx_handler)
+ return ENOTSUP;
if (overlap & MARK)
return ENOTSUP;
overlap |= MARK;
break;
}
case RTE_FLOW_ACTION_TYPE_FLAG: {
+ if (enic->use_noscatter_vec_rx_handler)
+ return ENOTSUP;
if (overlap & MARK)
return ENOTSUP;
overlap |= MARK;
if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
sprintf(ip6, "%s ",
- (gp->val_flags & FILTER_GENERIC_1_IPV4)
+ (gp->val_flags & FILTER_GENERIC_1_IPV6)
? "ip6(y)" : "ip6(n)");
else
sprintf(ip6, "%s ", "ip6(x)");
return -rte_errno;
}
enic_filter->type = enic->flow_filter_mode;
+ if (enic->adv_filters)
+ enic_filter->type = FILTER_DPDK_1;
ret = enic_copy_filter(pattern, enic_filter_cap, enic,
enic_filter, error);
return ret;
{
struct rte_flow *flow;
int err;
- u16 entry;
+ uint16_t entry;
ENICPMD_FUNC_TRACE();
enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
struct rte_flow_error *error)
{
- u16 filter_id;
+ uint16_t filter_id;
int err;
ENICPMD_FUNC_TRACE();