#include <rte_common.h>
#include <rte_ether.h>
-#include <rte_eth_ctrl.h>
#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include "mlx5.h"
#include "mlx5_defs.h"
-#include "mlx5_prm.h"
-#include "mlx5_glue.h"
#include "mlx5_flow.h"
+#include "mlx5_glue.h"
+#include "mlx5_prm.h"
+#include "mlx5_rxtx.h"
#define VERBS_SPEC_INNER(item_flags) \
(!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
struct mlx5_flow_counter *counter)
{
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct ibv_context *ctx = priv->sh->ctx;
struct ibv_counter_set_init_attr init = {
.counter_set_id = counter->id};
- counter->cs = mlx5_glue->create_counter_set(priv->ctx, &init);
+ counter->cs = mlx5_glue->create_counter_set(ctx, &init);
if (!counter->cs) {
rte_errno = ENOTSUP;
return -ENOTSUP;
}
return 0;
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct ibv_context *ctx = priv->sh->ctx;
struct ibv_counters_init_attr init = {0};
struct ibv_counter_attach_attr attach;
int ret;
memset(&attach, 0, sizeof(attach));
- counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
+ counter->cs = mlx5_glue->create_counters(ctx, &init);
if (!counter->cs) {
rte_errno = ENOTSUP;
return -ENOTSUP;
static struct mlx5_flow_counter *
flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_counter *cnt;
int ret;
if (shared) {
- LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
if (cnt->shared && cnt->id == id) {
cnt->ref_cnt++;
return cnt;
/* Create counter with Verbs. */
ret = flow_verbs_counter_create(dev, cnt);
if (!ret) {
- LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
return cnt;
}
/* Some error occurred in Verbs library. */
/**
* Release a flow counter.
*
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
* @param[in] counter
* Pointer to the counter handler.
*/
static void
-flow_verbs_counter_release(struct mlx5_flow_counter *counter)
+flow_verbs_counter_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter *counter)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
if (--counter->ref_cnt == 0) {
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
claim_zero(mlx5_glue->destroy_counters(counter->cs));
#endif
- LIST_REMOVE(counter, next);
+ TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
rte_free(counter);
}
}
if (spec) {
unsigned int i;
- memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(ð.val.src_mac, spec->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
eth.val.ether_type = spec->type;
- memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(ð.mask.src_mac, mask->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
eth.mask.ether_type = mask->type;
/* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
eth.val.src_mac[i] &= eth.mask.src_mac[i];
}
flow_verbs_spec_add(&dev_flow->verbs, ð, size);
else
flow_verbs_item_vlan_update(dev_flow->verbs.attr, ð);
+ if (!tunnel)
+ dev_flow->verbs.vf_vlan.tag =
+ rte_be_to_cpu_16(spec->tci) & 0x0fff;
}
/**
vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
ipv6.val.flow_label =
- rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
+ rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
+ RTE_IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
ipv6.val.next_hdr = spec->hdr.proto;
ipv6.val.hop_limit = spec->hdr.hop_limits;
ipv6.mask.flow_label =
- rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
+ rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
+ RTE_IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
ipv6.mask.next_hdr = mask->hdr.proto;
ipv6.mask.hop_limit = mask->hdr.hop_limits;
/* Remove unwanted bits from values. */
* Pointer to the list of items.
* @param[in] actions
* Pointer to the list of actions.
+ * @param[in] external
+ * This flow rule is created by request external to PMD.
* @param[out] error
* Pointer to the error structure.
*
const struct rte_flow_attr *attr,
const struct rte_flow_item items[],
const struct rte_flow_action actions[],
+ bool external __rte_unused,
struct rte_flow_error *error)
{
int ret;
break;
case RTE_FLOW_ITEM_TYPE_VLAN:
ret = mlx5_flow_validate_item_vlan(items, item_flags,
- error);
+ dev, error);
if (ret < 0)
return ret;
last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
ret = mlx5_flow_validate_item_ipv4(items, item_flags,
- error);
+ NULL, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
ret = mlx5_flow_validate_item_ipv6(items, item_flags,
- error);
+ NULL, error);
if (ret < 0)
return ret;
last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
case RTE_FLOW_ACTION_TYPE_RSS:
ret = mlx5_flow_validate_action_rss(actions,
action_flags, dev,
- attr,
+ attr, item_flags,
error);
if (ret < 0)
return ret;
* Pointer to the error structure.
*
* @return
- * 0 on success, else a negative errno value otherwise and rte_ernno is set.
+ * 0 on success, else a negative errno value otherwise and rte_errno is set.
*/
static int
flow_verbs_translate(struct rte_eth_dev *dev,
uint64_t action_flags = 0;
uint64_t priority = attr->priority;
uint32_t subpriority = 0;
- struct priv *priv = dev->data->dev_private;
+ struct mlx5_priv *priv = dev->data->dev_private;
if (priority == MLX5_FLOW_PRIO_RSVD)
priority = priv->config.flow_prio - 1;
dev_flow->layers = item_flags;
dev_flow->verbs.attr->priority =
mlx5_flow_adjust_priority(dev, priority, subpriority);
+ dev_flow->verbs.attr->port = (uint8_t)priv->ibv_port;
return 0;
}
mlx5_hrxq_release(dev, verbs->hrxq);
verbs->hrxq = NULL;
}
+ if (dev_flow->verbs.vf_vlan.tag &&
+ dev_flow->verbs.vf_vlan.created) {
+ mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+ }
}
}
rte_free(dev_flow);
}
if (flow->counter) {
- flow_verbs_counter_release(flow->counter);
+ flow_verbs_counter_release(dev, flow->counter);
flow->counter = NULL;
}
}
flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_verbs *verbs;
struct mlx5_flow *dev_flow;
int err;
(*flow->queue),
flow->rss.queue_num,
!!(dev_flow->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_FLOW_LAYER_TUNNEL));
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,
"hardware refuses to create flow");
goto error;
}
+ if (priv->vmwa_context &&
+ dev_flow->verbs.vf_vlan.tag &&
+ !dev_flow->verbs.vf_vlan.created) {
+ /*
+ * The rule contains the VLAN pattern.
+ * For VF we are going to create VLAN
+ * interface to make hypervisor set correct
+ * e-Switch vport context.
+ */
+ mlx5_vlan_vmwa_acquire(dev, &dev_flow->verbs.vf_vlan);
+ }
}
return 0;
error:
mlx5_hrxq_release(dev, verbs->hrxq);
verbs->hrxq = NULL;
}
+ if (dev_flow->verbs.vf_vlan.tag &&
+ dev_flow->verbs.vf_vlan.created) {
+ mlx5_vlan_vmwa_release(dev, &dev_flow->verbs.vf_vlan);
+ }
}
rte_errno = err; /* Restore rte_errno. */
return -rte_errno;