-/*-
- * BSD LICENSE
- *
- * Copyright 2016 6WIND S.A.
- * Copyright 2016 Mellanox.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of 6WIND S.A. nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright 2016 6WIND S.A.
+ * Copyright 2016 Mellanox.
*/
#include <sys/queue.h>
#pragma GCC diagnostic error "-Wpedantic"
#endif
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include <rte_malloc.h>
+#include <rte_ip.h>
#include "mlx5.h"
+#include "mlx5_defs.h"
#include "mlx5_prm.h"
+#include "mlx5_glue.h"
/* Define minimal priority for control plane flows. */
#define MLX5_CTRL_FLOW_PRIORITY 4
#define MLX5_IPV6 6
#ifndef HAVE_IBV_DEVICE_COUNTERS_SET_SUPPORT
-struct ibv_counter_set_init_attr {
- int dummy;
-};
struct ibv_flow_spec_counter_action {
int dummy;
};
-struct ibv_counter_set {
- int dummy;
-};
-
-static inline int
-ibv_destroy_counter_set(struct ibv_counter_set *cs)
-{
- (void)cs;
- return -ENOTSUP;
-}
#endif
/* Dev ops structure defined in mlx5.c */
.dst_addr = -1,
.type_of_service = -1,
.next_proto_id = -1,
+ .time_to_live = -1,
},
},
.default_mask = &rte_flow_item_ipv4_mask,
/** Structure to pass to the conversion function. */
struct mlx5_flow_parse {
uint32_t inner; /**< Set once VXLAN is encountered. */
+ uint32_t allmulti:1; /**< Set once allmulti dst MAC is encountered. */
uint32_t create:1;
/**< Whether resources should remain after a validate. */
uint32_t drop:1; /**< Target is a drop queue. */
}
if (item->mask) {
unsigned int i;
- const uint8_t *spec = item->mask;
+ const uint8_t *spec = item->spec;
for (i = 0; i < size; ++i)
if ((spec[i] | mask[i]) != mask[i])
struct mlx5_flow_parse *parser,
const struct rte_eth_rss_conf *rss_conf)
{
- const struct rte_eth_rss_conf *rss =
- rss_conf ? rss_conf : &priv->rss_conf;
+ const struct rte_eth_rss_conf *rss;
+ if (rss_conf) {
+ if (rss_conf->rss_hf & MLX5_RSS_HF_MASK)
+ return EINVAL;
+ rss = rss_conf;
+ } else {
+ rss = &priv->rss_conf;
+ }
if (rss->rss_key_len > 40)
return EINVAL;
parser->rss_conf.rss_key_len = rss->rss_key_len;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_FLAG) {
parser->mark = 1;
} else if (actions->type == RTE_FLOW_ACTION_TYPE_COUNT &&
- priv->counter_set_supported) {
+ priv->config.flow_counter_en) {
parser->count = 1;
} else {
goto exit_action_not_supported;
parser->queue[n].offset += cur_item->dst_sz;
}
}
+ if (parser->drop) {
+ parser->queue[HASH_RXQ_ETH].offset +=
+ sizeof(struct ibv_flow_spec_action_drop);
+ }
if (parser->mark) {
for (i = 0; i != hash_rxq_init_n; ++i)
parser->queue[i].offset +=
attr->priority +
hash_rxq_init[parser->layer].flow_priority;
}
+ if (parser->allmulti &&
+ parser->layer == HASH_RXQ_ETH) {
+ for (i = 0; i != hash_rxq_init_n; ++i) {
+ if (!parser->queue[i].ibv_attr)
+ continue;
+ if (parser->queue[i].ibv_attr->num_of_specs != 1)
+ break;
+ parser->queue[i].ibv_attr->type =
+ IBV_FLOW_ATTR_MC_DEFAULT;
+ }
+ }
exit_free:
/* Only verification is expected, all resources should be released. */
if (!parser->create) {
eth.val.ether_type &= eth.mask.ether_type;
}
mlx5_flow_create_copy(parser, ð, eth_size);
+ parser->allmulti = eth.val.dst_mac[0] & 1;
return 0;
}
parser->layer = HASH_RXQ_IPV6;
if (spec) {
unsigned int i;
+ uint32_t vtc_flow_val;
+ uint32_t vtc_flow_mask;
if (!mask)
mask = default_mask;
RTE_DIM(ipv6.mask.src_ip));
memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
RTE_DIM(ipv6.mask.dst_ip));
- ipv6.mask.flow_label = mask->hdr.vtc_flow;
+ vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
+ vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
+ ipv6.val.flow_label =
+ rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
+ ipv6.val.next_hdr = spec->hdr.proto;
+ ipv6.val.hop_limit = spec->hdr.hop_limits;
+ ipv6.mask.flow_label =
+ rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
+ IPV6_HDR_FL_SHIFT);
+ ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
+ IPV6_HDR_TC_SHIFT;
ipv6.mask.next_hdr = mask->hdr.proto;
ipv6.mask.hop_limit = mask->hdr.hop_limits;
/* Remove unwanted bits from values. */
ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
}
ipv6.val.flow_label &= ipv6.mask.flow_label;
+ ipv6.val.traffic_class &= ipv6.mask.traffic_class;
ipv6.val.next_hdr &= ipv6.mask.next_hdr;
ipv6.val.hop_limit &= ipv6.mask.hop_limit;
}
};
init_attr.counter_set_id = 0;
- parser->cs = ibv_create_counter_set(priv->ctx, &init_attr);
+ parser->cs = mlx5_glue->create_counter_set(priv->ctx, &init_attr);
if (!parser->cs)
return EINVAL;
counter.counter_set_handle = parser->cs->handle;
return 0;
parser->queue[HASH_RXQ_ETH].ibv_attr = NULL;
flow->frxq[HASH_RXQ_ETH].ibv_flow =
- ibv_create_flow(priv->flow_drop_queue->qp,
- flow->frxq[HASH_RXQ_ETH].ibv_attr);
+ mlx5_glue->create_flow(priv->flow_drop_queue->qp,
+ flow->frxq[HASH_RXQ_ETH].ibv_attr);
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
NULL, "flow rule creation failure");
error:
assert(flow);
if (flow->frxq[HASH_RXQ_ETH].ibv_flow) {
- claim_zero(ibv_destroy_flow(flow->frxq[HASH_RXQ_ETH].ibv_flow));
+ claim_zero(mlx5_glue->destroy_flow
+ (flow->frxq[HASH_RXQ_ETH].ibv_flow));
flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
}
if (flow->frxq[HASH_RXQ_ETH].ibv_attr) {
flow->frxq[HASH_RXQ_ETH].ibv_attr = NULL;
}
if (flow->cs) {
- claim_zero(ibv_destroy_counter_set(flow->cs));
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
flow->cs = NULL;
parser->cs = NULL;
}
if (!flow->frxq[i].hrxq)
continue;
flow->frxq[i].ibv_flow =
- ibv_create_flow(flow->frxq[i].hrxq->qp,
- flow->frxq[i].ibv_attr);
+ mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_HANDLE,
if (flow->frxq[i].ibv_flow) {
struct ibv_flow *ibv_flow = flow->frxq[i].ibv_flow;
- claim_zero(ibv_destroy_flow(ibv_flow));
+ claim_zero(mlx5_glue->destroy_flow(ibv_flow));
}
if (flow->frxq[i].hrxq)
mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
rte_free(flow->frxq[i].ibv_attr);
}
if (flow->cs) {
- claim_zero(ibv_destroy_counter_set(flow->cs));
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
flow->cs = NULL;
parser->cs = NULL;
}
free:
if (flow->drop) {
if (flow->frxq[HASH_RXQ_ETH].ibv_flow)
- claim_zero(ibv_destroy_flow
+ claim_zero(mlx5_glue->destroy_flow
(flow->frxq[HASH_RXQ_ETH].ibv_flow));
rte_free(flow->frxq[HASH_RXQ_ETH].ibv_attr);
} else {
struct mlx5_flow *frxq = &flow->frxq[i];
if (frxq->ibv_flow)
- claim_zero(ibv_destroy_flow(frxq->ibv_flow));
+ claim_zero(mlx5_glue->destroy_flow
+ (frxq->ibv_flow));
if (frxq->hrxq)
mlx5_priv_hrxq_release(priv, frxq->hrxq);
if (frxq->ibv_attr)
}
}
if (flow->cs) {
- claim_zero(ibv_destroy_counter_set(flow->cs));
+ claim_zero(mlx5_glue->destroy_counter_set(flow->cs));
flow->cs = NULL;
}
TAILQ_REMOVE(list, flow, next);
WARN("cannot allocate memory for drop queue");
goto error;
}
- fdq->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
+ fdq->cq = mlx5_glue->create_cq(priv->ctx, 1, NULL, NULL, 0);
if (!fdq->cq) {
WARN("cannot allocate CQ for drop queue");
goto error;
}
- fdq->wq = ibv_create_wq(priv->ctx,
- &(struct ibv_wq_init_attr){
+ fdq->wq = mlx5_glue->create_wq
+ (priv->ctx,
+ &(struct ibv_wq_init_attr){
.wq_type = IBV_WQT_RQ,
.max_wr = 1,
.max_sge = 1,
.pd = priv->pd,
.cq = fdq->cq,
- });
+ });
if (!fdq->wq) {
WARN("cannot allocate WQ for drop queue");
goto error;
}
- fdq->ind_table = ibv_create_rwq_ind_table(priv->ctx,
- &(struct ibv_rwq_ind_table_init_attr){
+ fdq->ind_table = mlx5_glue->create_rwq_ind_table
+ (priv->ctx,
+ &(struct ibv_rwq_ind_table_init_attr){
.log_ind_tbl_size = 0,
.ind_tbl = &fdq->wq,
.comp_mask = 0,
- });
+ });
if (!fdq->ind_table) {
WARN("cannot allocate indirection table for drop queue");
goto error;
}
- fdq->qp = ibv_create_qp_ex(priv->ctx,
- &(struct ibv_qp_init_attr_ex){
+ fdq->qp = mlx5_glue->create_qp_ex
+ (priv->ctx,
+ &(struct ibv_qp_init_attr_ex){
.qp_type = IBV_QPT_RAW_PACKET,
.comp_mask =
IBV_QP_INIT_ATTR_PD |
},
.rwq_ind_tbl = fdq->ind_table,
.pd = priv->pd
- });
+ });
if (!fdq->qp) {
WARN("cannot allocate QP for drop queue");
goto error;
return 0;
error:
if (fdq->qp)
- claim_zero(ibv_destroy_qp(fdq->qp));
+ claim_zero(mlx5_glue->destroy_qp(fdq->qp));
if (fdq->ind_table)
- claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
if (fdq->wq)
- claim_zero(ibv_destroy_wq(fdq->wq));
+ claim_zero(mlx5_glue->destroy_wq(fdq->wq));
if (fdq->cq)
- claim_zero(ibv_destroy_cq(fdq->cq));
+ claim_zero(mlx5_glue->destroy_cq(fdq->cq));
if (fdq)
rte_free(fdq);
priv->flow_drop_queue = NULL;
if (!fdq)
return;
if (fdq->qp)
- claim_zero(ibv_destroy_qp(fdq->qp));
+ claim_zero(mlx5_glue->destroy_qp(fdq->qp));
if (fdq->ind_table)
- claim_zero(ibv_destroy_rwq_ind_table(fdq->ind_table));
+ claim_zero(mlx5_glue->destroy_rwq_ind_table(fdq->ind_table));
if (fdq->wq)
- claim_zero(ibv_destroy_wq(fdq->wq));
+ claim_zero(mlx5_glue->destroy_wq(fdq->wq));
if (fdq->cq)
- claim_zero(ibv_destroy_cq(fdq->cq));
+ claim_zero(mlx5_glue->destroy_cq(fdq->cq));
rte_free(fdq);
priv->flow_drop_queue = NULL;
}
if (flow->drop) {
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow)
continue;
- claim_zero(ibv_destroy_flow
+ claim_zero(mlx5_glue->destroy_flow
(flow->frxq[HASH_RXQ_ETH].ibv_flow));
flow->frxq[HASH_RXQ_ETH].ibv_flow = NULL;
/* Next flow. */
for (i = 0; i != hash_rxq_init_n; ++i) {
if (!flow->frxq[i].ibv_flow)
continue;
- claim_zero(ibv_destroy_flow(flow->frxq[i].ibv_flow));
+ claim_zero(mlx5_glue->destroy_flow
+ (flow->frxq[i].ibv_flow));
flow->frxq[i].ibv_flow = NULL;
mlx5_priv_hrxq_release(priv, flow->frxq[i].hrxq);
flow->frxq[i].hrxq = NULL;
if (flow->drop) {
flow->frxq[HASH_RXQ_ETH].ibv_flow =
- ibv_create_flow
+ mlx5_glue->create_flow
(priv->flow_drop_queue->qp,
flow->frxq[HASH_RXQ_ETH].ibv_attr);
if (!flow->frxq[HASH_RXQ_ETH].ibv_flow) {
}
flow_create:
flow->frxq[i].ibv_flow =
- ibv_create_flow(flow->frxq[i].hrxq->qp,
- flow->frxq[i].ibv_attr);
+ mlx5_glue->create_flow(flow->frxq[i].hrxq->qp,
+ flow->frxq[i].ibv_attr);
if (!flow->frxq[i].ibv_flow) {
DEBUG("Flow %p cannot be applied",
(void *)flow);
.out = counters,
.outlen = 2 * sizeof(uint64_t),
};
- int res = ibv_query_counter_set(&query_cs_attr, &query_out);
+ int res = mlx5_glue->query_counter_set(&query_cs_attr, &query_out);
if (res) {
rte_flow_error_set(error, -res,
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_UDP,
.spec = &attributes->l4,
+ .mask = &attributes->l4,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
+ .mask = &attributes->l4,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV4,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_UDP,
.spec = &attributes->l4,
+ .mask = &attributes->l4,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
attributes->items[2] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_TCP,
.spec = &attributes->l4,
+ .mask = &attributes->l4,
};
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
attributes->items[1] = (struct rte_flow_item){
.type = RTE_FLOW_ITEM_TYPE_IPV6,
.spec = &attributes->l3,
+ .mask = &attributes->l3,
};
break;
default: