#include <rte_malloc.h>
#include <rte_ip.h>
-#include "mlx5.h"
+#include <mlx5_glue.h>
+#include <mlx5_prm.h>
+
#include "mlx5_defs.h"
+#include "mlx5.h"
#include "mlx5_flow.h"
-#include "mlx5_glue.h"
-#include "mlx5_prm.h"
#include "mlx5_rxtx.h"
#define VERBS_SPEC_INNER(item_flags) \
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
+ if (flow->counter && flow->counter->cs) {
struct rte_flow_query_count *qc = data;
uint64_t counters[2] = {0, 0};
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
const struct rte_flow_action_queue *queue = action->conf;
struct rte_flow *flow = dev_flow->flow;
- if (flow->queue)
- (*flow->queue)[0] = queue->index;
+ if (flow->rss.queue)
+ (*flow->rss.queue)[0] = queue->index;
flow->rss.queue_num = 1;
}
const uint8_t *rss_key;
struct rte_flow *flow = dev_flow->flow;
- if (flow->queue)
- memcpy((*flow->queue), rss->queue,
+ if (flow->rss.queue)
+ memcpy((*flow->rss.queue), rss->queue,
rss->queue_num * sizeof(uint16_t));
flow->rss.queue_num = rss->queue_num;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
- /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
- flow->rss.level = rss->level;
+ memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /*
+ * rss->level and rss.types should be set in advance when expanding
+ * items for RSS.
+ */
}
/**
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
+ uint16_t ether_type = 0;
if (items == NULL)
return -1;
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_eth *)
+ items->spec)->type;
+ ether_type &=
+ ((const struct rte_flow_item_eth *)
+ items->mask)->type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
MLX5_FLOW_LAYER_INNER_VLAN) :
(MLX5_FLOW_LAYER_OUTER_L2 |
MLX5_FLOW_LAYER_OUTER_VLAN);
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_vlan *)
+ items->spec)->inner_type;
+ ether_type &=
+ ((const struct rte_flow_item_vlan *)
+ items->mask)->inner_type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- NULL, error);
+ last_item,
+ ether_type, NULL,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- NULL, error);
+ last_item,
+ ether_type, NULL,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
"action not supported");
}
}
+ /*
+ * Validate the drop action mutual exclusion with other actions.
+ * Drop action is mutually-exclusive with any other action, except for
+ * Count action.
+ */
+ if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
+ (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Drop action is mutually-exclusive "
+ "with any other action, except for "
+ "Count action");
if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
- struct mlx5_flow *flow;
+ size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+ struct mlx5_flow *dev_flow;
size += flow_verbs_get_actions_size(actions);
size += flow_verbs_get_items_size(items);
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
+ dev_flow = rte_calloc(__func__, 1, size, 0);
+ if (!dev_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"not enough memory to create flow");
return NULL;
}
- flow->verbs.attr = (void *)(flow + 1);
- flow->verbs.specs =
- (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
- return flow;
+ dev_flow->verbs.attr = (void *)(dev_flow + 1);
+ dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
+ dev_flow->ingress = attr->ingress;
+ dev_flow->transfer = attr->transfer;
+ return dev_flow;
}
/**
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct rte_flow *flow = dev_flow->flow;
uint64_t item_flags = 0;
uint64_t action_flags = 0;
uint64_t priority = attr->priority;
"action not supported");
}
}
- flow->actions = action_flags;
+ dev_flow->actions = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
flow_verbs_translate_item_ipv4(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L3;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV4_LAYER_TYPES,
flow_verbs_translate_item_ipv6(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L3;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV6_LAYER_TYPES,
flow_verbs_translate_item_tcp(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L4;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_TCP,
(IBV_RX_HASH_SRC_PORT_TCP |
flow_verbs_translate_item_udp(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L4;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_UDP,
(IBV_RX_HASH_SRC_PORT_UDP |
verbs->flow = NULL;
}
if (verbs->hrxq) {
- if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, verbs->hrxq);
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
verbs = &dev_flow->verbs;
- if (flow->actions & MLX5_FLOW_ACTION_DROP) {
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
verbs->hrxq = mlx5_hrxq_drop_new(dev);
if (!verbs->hrxq) {
rte_flow_error_set
} else {
struct mlx5_hrxq *hrxq;
- hrxq = mlx5_hrxq_get(dev, flow->key,
+ assert(flow->rss.queue);
+ hrxq = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
flow->rss.queue_num);
if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->key,
+ hrxq = mlx5_hrxq_new(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
flow->rss.queue_num,
!!(dev_flow->layers &
MLX5_FLOW_LAYER_TUNNEL));
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
verbs = &dev_flow->verbs;
if (verbs->hrxq) {
- if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, verbs->hrxq);