#include <rte_common.h>
#include <rte_ether.h>
-#include <rte_eth_ctrl.h>
#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
#include <rte_ip.h>
-#include "mlx5.h"
+#include <mlx5_glue.h>
+#include <mlx5_prm.h>
+
#include "mlx5_defs.h"
-#include "mlx5_prm.h"
-#include "mlx5_glue.h"
+#include "mlx5.h"
#include "mlx5_flow.h"
+#include "mlx5_rxtx.h"
#define VERBS_SPEC_INNER(item_flags) \
(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
int ret;
if (shared) {
- LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
if (cnt->shared && cnt->id == id) {
cnt->ref_cnt++;
return cnt;
/* Create counter with Verbs. */
ret = flow_verbs_counter_create(dev, cnt);
if (!ret) {
- LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
return cnt;
}
/* Some error occurred in Verbs library. */
/**
* Release a flow counter.
*
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
* @param[in] counter
* Pointer to the counter handler.
*/
static void
-flow_verbs_counter_release(struct mlx5_flow_counter *counter)
+flow_verbs_counter_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter *counter)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
if (--counter->ref_cnt == 0) {
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
claim_zero(mlx5_glue->destroy_counters(counter->cs));
#endif
- LIST_REMOVE(counter, next);
+ TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
rte_free(counter);
}
}
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
+ if (flow->counter && flow->counter->cs) {
struct rte_flow_query_count *qc = data;
uint64_t counters[2] = {0, 0};
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
if (spec) {
unsigned int i;
- memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(ð.val.src_mac, spec->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
eth.val.ether_type = spec->type;
- memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(ð.mask.src_mac, mask->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
eth.mask.ether_type = mask->type;
/* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
eth.val.src_mac[i] &= eth.mask.src_mac[i];
}
flow_verbs_spec_add(&dev_flow->verbs, ð, size);
else
flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
+ if (!tunnel)
+ dev_flow->verbs.vf_vlan.tag =
+ rte_be_to_cpu_16(spec->tci) & 0x0fff;
}
/**
vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
ipv6.val.flow_label =
- rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
+ rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
+ RTE_IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
ipv6.val.next_hdr = spec->hdr.proto;
ipv6.val.hop_limit = spec->hdr.hop_limits;
ipv6.mask.flow_label =
- rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
+ rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
+ RTE_IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
ipv6.mask.next_hdr = mask->hdr.proto;
ipv6.mask.hop_limit = mask->hdr.hop_limits;
/* Remove unwanted bits from values. */
const struct rte_flow_action_queue *queue = action->conf;
struct rte_flow *flow = dev_flow->flow;
- if (flow->queue)
- (*flow->queue)[0] = queue->index;
+ if (flow->rss.queue)
+ (*flow->rss.queue)[0] = queue->index;
flow->rss.queue_num = 1;
}
const uint8_t *rss_key;
struct rte_flow *flow = dev_flow->flow;
- if (flow->queue)
- memcpy((*flow->queue), rss->queue,
+ if (flow->rss.queue)
+ memcpy((*flow->rss.queue), rss->queue,
rss->queue_num * sizeof(uint16_t));
flow->rss.queue_num = rss->queue_num;
/* NULL RSS key indicates default RSS key. */
rss_key = !rss->key ? rss_hash_default_key : rss->key;
- memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
- /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
- flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
- flow->rss.level = rss->level;
+ memcpy(flow->rss.key, rss_key, MLX5_RSS_HASH_KEY_LEN);
+ /*
+ * rss->level and rss.types should be set in advance when expanding
+ * items for RSS.
+ */
}
/**
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
* @param[out] error
* Pointer to the error structure.
*
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
+ bool external __rte_unused,
struct rte_flow_error *error)
{
int ret;
uint64_t item_flags = 0;
uint64_t last_item = 0;
uint8_t next_protocol = 0xff;
+ uint16_t ether_type = 0;
if (items == NULL)
return -1;
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
MLX5_FLOW_LAYER_OUTER_L2;
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_eth *)
+ items->spec)->type;
+ ether_type &=
+ ((const struct rte_flow_item_eth *)
+ items->mask)->type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
- error);
+ dev, error);
if (ret < 0)
return ret;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
MLX5_FLOW_LAYER_INNER_VLAN) :
(MLX5_FLOW_LAYER_OUTER_L2 |
MLX5_FLOW_LAYER_OUTER_VLAN);
+ if (items->mask != NULL && items->spec != NULL) {
+ ether_type =
+ ((const struct rte_flow_item_vlan *)
+ items->spec)->inner_type;
+ ether_type &=
+ ((const struct rte_flow_item_vlan *)
+ items->mask)->inner_type;
+ ether_type = rte_be_to_cpu_16(ether_type);
+ } else {
+ ether_type = 0;
+ }
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- NULL, error);
+ last_item,
+ ether_type, NULL,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- NULL, error);
+ last_item,
+ ether_type, NULL,
+ error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
case RTE_FLOW_ACTION_TYPE_RSS:
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
- attr,
+ attr, item_flags,
error);
if (ret < 0)
return ret;
"action not supported");
}
}
+ /*
+ * Validate the drop action mutual exclusion with other actions.
+ * Drop action is mutually-exclusive with any other action, except for
+ * Count action.
+ */
+ if ((action_flags & MLX5_FLOW_ACTION_DROP) &&
+ (action_flags & ~(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_COUNT)))
+ return rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION, NULL,
+ "Drop action is mutually-exclusive "
+ "with any other action, except for "
+ "Count action");
if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
- struct mlx5_flow *flow;
+ size_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
+ struct mlx5_flow *dev_flow;
size += flow_verbs_get_actions_size(actions);
size += flow_verbs_get_items_size(items);
- flow = rte_calloc(__func__, 1, size, 0);
- if (!flow) {
+ dev_flow = rte_calloc(__func__, 1, size, 0);
+ if (!dev_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"not enough memory to create flow");
return NULL;
}
- flow->verbs.attr = (void *)(flow + 1);
- flow->verbs.specs =
- (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
- return flow;
+ dev_flow->verbs.attr = (void *)(dev_flow + 1);
+ dev_flow->verbs.specs = (void *)(dev_flow->verbs.attr + 1);
+ dev_flow->ingress = attr->ingress;
+ dev_flow->transfer = attr->transfer;
+ return dev_flow;
}
/**
* Pointer to the error structure.
*
* @return
- * 0 on success, else a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, else a negative errno value otherwise and rte_errno is set.
*/
static int
flow_verbs_translate(struct rte_eth_dev *dev,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct rte_flow *flow = dev_flow->flow;
uint64_t item_flags = 0;
uint64_t action_flags = 0;
uint64_t priority = attr->priority;
"action not supported");
}
}
- flow->actions = action_flags;
+ dev_flow->actions = action_flags;
for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
flow_verbs_translate_item_ipv4(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L3;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV4_LAYER_TYPES,
flow_verbs_translate_item_ipv6(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L3;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel,
MLX5_IPV6_LAYER_TYPES,
flow_verbs_translate_item_tcp(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L4;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_TCP,
(IBV_RX_HASH_SRC_PORT_TCP |
flow_verbs_translate_item_udp(dev_flow, items,
item_flags);
subpriority = MLX5_PRIORITY_MAP_L4;
- dev_flow->verbs.hash_fields |=
+ dev_flow->hash_fields |=
mlx5_flow_hashfields_adjust
(dev_flow, tunnel, ETH_RSS_UDP,
(IBV_RX_HASH_SRC_PORT_UDP |
dev_flow->layers = item_flags;
dev_flow->verbs.attr->priority =
mlx5_flow_adjust_priority(dev, priority, subpriority);
+ dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
return 0;
}
verbs->flow = NULL;
}
if (verbs->hrxq) {
- if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, verbs->hrxq);
verbs->hrxq = NULL;
}
+ if (dev_flow->verbs.vf_vlan.tag &&
+ dev_flow->verbs.vf_vlan.created) {
+ mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+ }
}
}
rte_free(dev_flow);
}
if (flow->counter) {
- flow_verbs_counter_release(flow->counter);
+ flow_verbs_counter_release(dev, flow->counter);
flow->counter = NULL;
}
}
flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_verbs *verbs;
struct mlx5_flow *dev_flow;
int err;
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
verbs = &dev_flow->verbs;
- if (flow->actions & MLX5_FLOW_ACTION_DROP) {
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP) {
verbs->hrxq = mlx5_hrxq_drop_new(dev);
if (!verbs->hrxq) {
rte_flow_error_set
} else {
struct mlx5_hrxq *hrxq;
- hrxq = mlx5_hrxq_get(dev, flow->key,
+ assert(flow->rss.queue);
+ hrxq = mlx5_hrxq_get(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
flow->rss.queue_num);
if (!hrxq)
- hrxq = mlx5_hrxq_new(dev, flow->key,
+ hrxq = mlx5_hrxq_new(dev, flow->rss.key,
MLX5_RSS_HASH_KEY_LEN,
- verbs->hash_fields,
- (*flow->queue),
+ dev_flow->hash_fields,
+ (*flow->rss.queue),
flow->rss.queue_num,
!!(dev_flow->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"hardware refuses to create flow");
goto error;
}
+ if (priv->vmwa_context &&
+ dev_flow->verbs.vf_vlan.tag &&
+ !dev_flow->verbs.vf_vlan.created) {
+ /*
+ * The rule contains the VLAN pattern.
+ * For VF we are going to create VLAN
+ * interface to make hypervisor set correct
+ * e-Switch vport context.
+ */
+ mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan);
+ }
}
return 0;
error:
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
verbs = &dev_flow->verbs;
if (verbs->hrxq) {
- if (flow->actions & MLX5_FLOW_ACTION_DROP)
+ if (dev_flow->actions & MLX5_FLOW_ACTION_DROP)
mlx5_hrxq_drop_release(dev);
else
mlx5_hrxq_release(dev, verbs->hrxq);
verbs->hrxq = NULL;
}
+ if (dev_flow->verbs.vf_vlan.tag &&
+ dev_flow->verbs.vf_vlan.created) {
+ mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+ }
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;