#include <rte_ether.h>
#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
+#include <rte_tailq.h>
#include "ice_ethdev.h"
#include "ice_generic_flow.h"
if (!(ipv6_spec && ipv6_mask))
break;
- if (ipv6_mask->hdr.payload_len ||
- ipv6_mask->hdr.vtc_flow) {
+ if (ipv6_mask->hdr.payload_len) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
input_set |= ICE_INSET_IPV6_PROTO;
if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
input_set |= ICE_INSET_IPV6_HOP_LIMIT;
+ if ((ipv6_mask->hdr.vtc_flow &
+ rte_cpu_to_be_32(RTE_IPV6_HDR_TC_MASK))
+ == rte_cpu_to_be_32
+ (RTE_IPV6_HDR_TC_MASK))
+ input_set |= ICE_INSET_IPV6_TOS;
}
break;
"Invalid VXLAN item");
return 0;
}
+ if (vxlan_mask && vxlan_mask->vni[0] == UINT8_MAX &&
+ vxlan_mask->vni[1] == UINT8_MAX &&
+ vxlan_mask->vni[2] == UINT8_MAX)
+ input_set |= ICE_INSET_TUN_ID;
is_tunnel = 1;
break;
case RTE_FLOW_ITEM_TYPE_NVGRE:
nvgre_spec = item->spec;
nvgre_mask = item->mask;
- /* Check if VXLAN item is used to describe protocol.
+ /* Check if NVGRE item is used to describe protocol.
* If yes, both spec and mask should be NULL.
* If no, both spec and mask shouldn't be NULL.
*/
"Invalid NVGRE item");
return 0;
}
+ if (nvgre_mask && nvgre_mask->tni[0] == UINT8_MAX &&
+ nvgre_mask->tni[1] == UINT8_MAX &&
+ nvgre_mask->tni[2] == UINT8_MAX)
+ input_set |= ICE_INSET_TUN_ID;
is_tunnel = 1;
+ break;
+ case RTE_FLOW_ITEM_TYPE_VOID:
break;
default:
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item,
- "Invalid mask no exist");
+ "Invalid pattern");
break;
}
}
{
const struct rte_flow_action_queue *act_q;
uint16_t queue;
-
- switch (actions->type) {
- case RTE_FLOW_ACTION_TYPE_QUEUE:
- act_q = actions->conf;
- queue = act_q->index;
- if (queue >= dev->data->nb_rx_queues) {
+ const struct rte_flow_action *action;
+ for (action = actions; action->type !=
+ RTE_FLOW_ACTION_TYPE_END; action++) {
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ act_q = action->conf;
+ queue = act_q->index;
+ if (queue >= dev->data->nb_rx_queues) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ actions, "Invalid queue ID for"
+ " switch filter.");
+ return -rte_errno;
+ }
+ break;
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ case RTE_FLOW_ACTION_TYPE_VOID:
+ break;
+ default:
rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION,
- actions, "Invalid queue ID for"
- " switch filter.");
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Invalid action.");
return -rte_errno;
}
- break;
- case RTE_FLOW_ACTION_TYPE_DROP:
- break;
- default:
- rte_flow_error_set(error, EINVAL,
- RTE_FLOW_ERROR_TYPE_ACTION, actions,
- "Invalid action.");
- return -rte_errno;
}
-
return 0;
}
{
struct ice_pf *pf = ICE_DEV_PRIVATE_TO_PF(dev->data->dev_private);
struct rte_flow *p_flow;
+ void *temp;
int ret = 0;
- TAILQ_FOREACH(p_flow, &pf->flow_list, node) {
+ TAILQ_FOREACH_SAFE(p_flow, &pf->flow_list, node, temp) {
ret = ice_flow_destroy(dev, p_flow, error);
if (ret) {
rte_flow_error_set(error, -ret,