#include <rte_common.h>
#include <rte_ether.h>
-#include <rte_eth_ctrl.h>
#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
int ret;
if (shared) {
- LIST_FOREACH(cnt, &priv->flow_counters, next) {
+ TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
if (cnt->shared && cnt->id == id) {
cnt->ref_cnt++;
return cnt;
/* Create counter with Verbs. */
ret = flow_verbs_counter_create(dev, cnt);
if (!ret) {
- LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
+ TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
return cnt;
}
/* Some error occurred in Verbs library. */
/**
* Release a flow counter.
*
+ * @param[in] dev
+ * Pointer to the Ethernet device structure.
* @param[in] counter
* Pointer to the counter handler.
*/
static void
-flow_verbs_counter_release(struct mlx5_flow_counter *counter)
+flow_verbs_counter_release(struct rte_eth_dev *dev,
+ struct mlx5_flow_counter *counter)
{
+ struct mlx5_priv *priv = dev->data->dev_private;
+
if (--counter->ref_cnt == 0) {
#if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
#elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
claim_zero(mlx5_glue->destroy_counters(counter->cs));
#endif
- LIST_REMOVE(counter, next);
+ TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
rte_free(counter);
}
}
if (spec) {
unsigned int i;
- memcpy(ð.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.val.dst_mac, spec->dst.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(ð.val.src_mac, spec->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
eth.val.ether_type = spec->type;
- memcpy(ð.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
- memcpy(ð.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
+ memcpy(ð.mask.dst_mac, mask->dst.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
+ memcpy(ð.mask.src_mac, mask->src.addr_bytes,
+ RTE_ETHER_ADDR_LEN);
eth.mask.ether_type = mask->type;
/* Remove unwanted bits from values. */
- for (i = 0; i < ETHER_ADDR_LEN; ++i) {
+ for (i = 0; i < RTE_ETHER_ADDR_LEN; ++i) {
eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
eth.val.src_mac[i] &= eth.mask.src_mac[i];
}
vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
ipv6.val.flow_label =
- rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
+ rte_cpu_to_be_32((vtc_flow_val & RTE_IPV6_HDR_FL_MASK) >>
+ RTE_IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
ipv6.val.next_hdr = spec->hdr.proto;
ipv6.val.hop_limit = spec->hdr.hop_limits;
ipv6.mask.flow_label =
- rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
- IPV6_HDR_FL_SHIFT);
- ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
- IPV6_HDR_TC_SHIFT;
+ rte_cpu_to_be_32((vtc_flow_mask & RTE_IPV6_HDR_FL_MASK) >>
+ RTE_IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & RTE_IPV6_HDR_TC_MASK) >>
+ RTE_IPV6_HDR_TC_SHIFT;
ipv6.mask.next_hdr = mask->hdr.proto;
ipv6.mask.hop_limit = mask->hdr.hop_limits;
/* Remove unwanted bits from values. */
rte_free(dev_flow);
}
if (flow->counter) {
- flow_verbs_counter_release(flow->counter);
+ flow_verbs_counter_release(dev, flow->counter);
flow->counter = NULL;
}
}
(*flow->queue),
flow->rss.queue_num,
!!(dev_flow->layers &
- MLX5_FLOW_LAYER_TUNNEL));
+ MLX5_FLOW_LAYER_TUNNEL),
+ 0);
if (!hrxq) {
rte_flow_error_set
(error, rte_errno,