Adds calls to the Direct Rules API inside the glue functions.
Due to difference in parameters between the Direct Rules and Direct
Verbs some of the glue functions API was updated.
Signed-off-by: Ori Kam <orika@mellanox.com>
Acked-by: Shahaf Shuler <shahafs@mellanox.com>
infiniband/mlx5dv.h \
func mlx5dv_create_flow_action_packet_reformat \
$(AUTOCONF_OUTPUT)
+ $Q sh -- '$<' '$@' \
+ HAVE_MLX5DV_DR \
+ infiniband/mlx5dv.h \
+ enum MLX5DV_DR_NS_TYPE_TERMINATING \
+ $(AUTOCONF_OUTPUT)
$Q sh -- '$<' '$@' \
HAVE_IBV_DEVX_OBJ \
infiniband/mlx5dv.h \
priv->tcf_context = NULL;
}
}
+#ifdef HAVE_MLX5DV_DR
+ priv->rx_ns = mlx5dv_dr_create_ns
+ (sh->ctx, MLX5DV_DR_NS_DOMAIN_INGRESS_BYPASS);
+ if (priv->rx_ns == NULL) {
+ DRV_LOG(ERR, "mlx5dv_dr_create_ns failed");
+ err = errno;
+ goto error;
+ }
+ priv->tx_ns = mlx5dv_dr_create_ns(sh->ctx,
+ MLX5DV_DR_NS_DOMAIN_EGRESS_BYPASS);
+ if (priv->tx_ns == NULL) {
+ DRV_LOG(ERR, "mlx5dv_dr_create_ns failed");
+ err = errno;
+ goto error;
+ }
+#endif
TAILQ_INIT(&priv->flows);
TAILQ_INIT(&priv->ctrl_flows);
/* Hint libmlx5 to use PMD allocator for data plane resources */
struct mlx5_ibv_shared_port port[]; /* per device port data array. */
};
+/* Table structure. */
+struct mlx5_flow_tbl_resource {
+ void *obj; /**< Pointer to DR table object. */
+ rte_atomic32_t refcnt; /**< Reference counter. */
+};
+
+#define MLX5_MAX_TABLES 1024
+#define MLX5_GROUP_FACTOR 1
+
struct mlx5_priv {
LIST_ENTRY(mlx5_priv) mem_event_cb;
/**< Called by memory event callback. */
/* UAR same-page access control required in 32bit implementations. */
#endif
struct mlx5_flow_tcf_context *tcf_context; /* TC flower context. */
+ void *rx_ns; /* RX Direct Rules name space handle. */
+ struct mlx5_flow_tbl_resource rx_tbl[MLX5_MAX_TABLES];
+ /* RX Direct Rules tables. */
+ void *tx_ns; /* TX Direct Rules name space handle. */
+ struct mlx5_flow_tbl_resource tx_tbl[MLX5_MAX_TABLES];
+ /* TX Direct Rules tables/ */
};
#define PORT_ID(priv) ((priv)->dev_data->port_id)
flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
flow = rte_calloc(__func__, 1, flow_size, 0);
flow->drv_type = flow_get_drv_type(dev, attr);
+ flow->ingress = attr->ingress;
assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
flow->drv_type < MLX5_FLOW_TYPE_MAX);
flow->queue = (void *)(flow + 1);
uint16_t crc; /**< CRC of key. */
uint16_t priority; /**< Priority of matcher. */
uint8_t egress; /**< Egress matcher. */
+ uint32_t group; /**< The matcher group. */
struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
};
size_t size;
uint8_t reformat_type;
uint8_t ft_type;
+ uint64_t flags; /**< Flags for RDMA API. */
};
/* Tag resource structure. */
/* Flow structure. */
struct rte_flow {
TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */
- enum mlx5_flow_drv_type drv_type; /**< Drvier type. */
+ enum mlx5_flow_drv_type drv_type; /**< Driver type. */
struct mlx5_flow_counter *counter; /**< Holds flow counter. */
struct mlx5_flow_dv_tag_resource *tag_resource;
/**< pointer to the tag action. */
uint64_t actions;
/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */
+ uint8_t ingress; /**< 1 if the flow is ingress. */
+ uint32_t group; /**< The group index. */
};
typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
{
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_encap_decap_resource *cache_resource;
+ struct rte_flow *flow = dev_flow->flow;
+ struct mlx5dv_dr_ns *ns;
+
+ resource->flags = flow->group ? 0 : 1;
+ if (flow->ingress)
+ ns = priv->rx_ns;
+ else
+ ns = priv->tx_ns;
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
if (resource->reformat_type == cache_resource->reformat_type &&
resource->ft_type == cache_resource->ft_type &&
+ resource->flags == cache_resource->flags &&
resource->size == cache_resource->size &&
!memcmp((const void *)resource->buf,
(const void *)cache_resource->buf,
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_packet_reformat
- (priv->sh->ctx, cache_resource->size,
- (cache_resource->size ? cache_resource->buf : NULL),
- cache_resource->reformat_type,
- cache_resource->ft_type);
+ (priv->sh->ctx, cache_resource->reformat_type,
+ cache_resource->ft_type, ns, cache_resource->flags,
+ cache_resource->size,
+ (cache_resource->size ? cache_resource->buf : NULL));
if (!cache_resource->verbs_action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
struct mlx5_priv *priv = dev->data->dev_private;
struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
+ struct mlx5dv_dr_ns *ns =
+ resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX ?
+ priv->tx_ns : priv->rx_ns;
+
/* Lookup a matching resource from cache. */
LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
if (resource->ft_type == cache_resource->ft_type &&
*cache_resource = *resource;
cache_resource->verbs_action =
mlx5_glue->dv_create_flow_action_modify_header
- (priv->sh->ctx,
+ (priv->sh->ctx, cache_resource->ft_type,
+ ns, 0,
cache_resource->actions_num *
sizeof(cache_resource->actions[0]),
- (uint64_t *)cache_resource->actions,
- cache_resource->ft_type);
+ (uint64_t *)cache_resource->actions);
if (!cache_resource->verbs_action) {
rte_free(cache_resource);
return rte_flow_error_set(error, ENOMEM,
struct mlx5_priv *priv = dev->data->dev_private;
uint32_t priority_max = priv->config.flow_prio - 1;
+#ifdef HAVE_MLX5DV_DR
if (attributes->group)
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
NULL,
"groups is not supported");
+#endif
if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
attributes->priority >= priority_max)
return rte_flow_error_set(error, ENOTSUP,
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
+ * @param[in] group
+ * The group to insert the rule.
*/
static void
flow_dv_translate_item_ipv4(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ int inner, uint32_t group)
{
const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ if (group == 0)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ else
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
if (!ipv4_v)
return;
* Flow pattern to translate.
* @param[in] inner
* Item is inner pattern.
+ * @param[in] group
+ * The group to insert the rule.
*/
static void
flow_dv_translate_item_ipv6(void *matcher, void *key,
const struct rte_flow_item *item,
- int inner)
+ int inner, uint32_t group)
{
const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
outer_headers);
headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
}
- MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ if (group == 0)
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
+ else
+ MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
if (!ipv6_v)
return;
match_criteria_enable |=
(!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
-
+#ifdef HAVE_MLX5DV_DR
+ match_criteria_enable |=
+ (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
+ MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
+#endif
return match_criteria_enable;
}
.type = IBV_FLOW_ATTR_NORMAL,
.match_mask = (void *)&matcher->mask,
};
+ struct mlx5_flow_tbl_resource *tbl = NULL;
/* Lookup from cache. */
LIST_FOREACH(cache_matcher, &priv->matchers, next) {
if (matcher->crc == cache_matcher->crc &&
matcher->priority == cache_matcher->priority &&
matcher->egress == cache_matcher->egress &&
+ matcher->group == cache_matcher->group &&
!memcmp((const void *)matcher->mask.buf,
(const void *)cache_matcher->mask.buf,
cache_matcher->mask.size)) {
return 0;
}
}
+#ifdef HAVE_MLX5DV_DR
+ if (matcher->egress) {
+ tbl = &priv->tx_tbl[matcher->group];
+ if (!tbl->obj)
+ tbl->obj = mlx5_glue->dr_create_flow_tbl
+ (priv->tx_ns,
+ matcher->group * MLX5_GROUP_FACTOR);
+ } else {
+ tbl = &priv->rx_tbl[matcher->group];
+ if (!tbl->obj)
+ tbl->obj = mlx5_glue->dr_create_flow_tbl
+ (priv->rx_ns,
+ matcher->group * MLX5_GROUP_FACTOR);
+ }
+ if (!tbl->obj)
+ return rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "cannot create table");
+
+ rte_atomic32_inc(&tbl->refcnt);
+#endif
/* Register new matcher. */
cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
if (!cache_matcher)
if (matcher->egress)
dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
cache_matcher->matcher_object =
- mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr);
+ mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr,
+ tbl->obj);
if (!cache_matcher->matcher_object) {
rte_free(cache_matcher);
+#ifdef HAVE_MLX5DV_DR
+ if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+ mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ tbl->obj = NULL;
+ }
+#endif
return rte_flow_error_set(error, ENOMEM,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
NULL, "cannot create matcher");
cache_matcher->priority,
cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
rte_atomic32_read(&cache_matcher->refcnt));
+ rte_atomic32_inc(&tbl->refcnt);
return 0;
}
break;
case RTE_FLOW_ITEM_TYPE_IPV4:
flow_dv_translate_item_ipv4(match_mask, match_value,
- items, tunnel);
+ items, tunnel, attr->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
dev_flow->dv.hash_fields |=
mlx5_flow_hashfields_adjust
break;
case RTE_FLOW_ITEM_TYPE_IPV6:
flow_dv_translate_item_ipv6(match_mask, match_value,
- items, tunnel);
+ items, tunnel, attr->group);
matcher.priority = MLX5_PRIORITY_MAP_L3;
dev_flow->dv.hash_fields |=
mlx5_flow_hashfields_adjust
matcher.priority = mlx5_flow_adjust_priority(dev, priority,
matcher.priority);
matcher.egress = attr->egress;
+ matcher.group = attr->group;
if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
return -rte_errno;
return 0;
struct mlx5_flow *flow)
{
struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
+ struct mlx5_priv *priv = dev->data->dev_private;
+ struct mlx5_flow_tbl_resource *tbl;
assert(matcher->matcher_object);
DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
claim_zero(mlx5_glue->dv_destroy_flow_matcher
(matcher->matcher_object));
LIST_REMOVE(matcher, next);
+ if (matcher->egress)
+ tbl = &priv->tx_tbl[matcher->group];
+ else
+ tbl = &priv->rx_tbl[matcher->group];
+ if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
+ mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
+ tbl->obj = NULL;
+ }
rte_free(matcher);
DRV_LOG(DEBUG, "port %u matcher %p: removed",
dev->data->port_id, (void *)matcher);
LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
dv = &dev_flow->dv;
if (dv->flow) {
- claim_zero(mlx5_glue->destroy_flow(dv->flow));
+ claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
dv->flow = NULL;
}
if (dv->hrxq) {
mlx5_glue_destroy_flow_action(void *action)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_action(action);
+#else
struct mlx5dv_flow_action_attr *attr = action;
int res = 0;
switch (attr->type) {
}
free(action);
return res;
+#endif
#else
(void)action;
return ENOTSUP;
return ibv_cq_ex_to_cq(cq);
}
+static void *
+mlx5_glue_dr_create_flow_tbl(void *ns, uint32_t level)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_ft(ns, level);
+#else
+ (void)ns;
+ (void)level;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_flow_tbl(void *tbl)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_ft(tbl);
+#else
+ (void)tbl;
+ return 0;
+#endif
+}
+
+static void *
+mlx5_glue_dr_create_ns(struct ibv_context *ctx,
+ enum mlx5dv_dr_ns_domain domain)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_ns(ctx, domain);
+#else
+ (void)ctx;
+ (void)domain;
+ return NULL;
+#endif
+}
+
+static int
+mlx5_glue_dr_destroy_ns(void *ns)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_ns(ns);
+#else
+ (void)ns;
+ return 0;
+#endif
+}
+
static struct ibv_cq_ex *
mlx5_glue_dv_create_cq(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
#endif
}
-static struct mlx5dv_flow_matcher *
+static void *
mlx5_glue_dv_create_flow_matcher(struct ibv_context *context,
- struct mlx5dv_flow_matcher_attr *matcher_attr)
+ struct mlx5dv_flow_matcher_attr *matcher_attr,
+ void *tbl)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)context;
+ return mlx5dv_dr_create_matcher(tbl, matcher_attr->priority,
+ matcher_attr->match_criteria_enable,
+ matcher_attr->match_mask);
+#else
+ (void)tbl;
return mlx5dv_create_flow_matcher(context, matcher_attr);
+#endif
#else
(void)context;
(void)matcher_attr;
+ (void)tbl;
return NULL;
#endif
}
-static struct ibv_flow *
-mlx5_glue_dv_create_flow(struct mlx5dv_flow_matcher *matcher,
- struct mlx5dv_flow_match_parameters *match_value,
+static void *
+mlx5_glue_dv_create_flow(void *matcher,
+ void *match_value,
size_t num_actions,
void *actions[])
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_rule(matcher, match_value, num_actions,
+ (struct mlx5dv_dr_action **)actions);
+#else
struct mlx5dv_flow_action_attr actions_attr[8];
if (num_actions > 8)
*((struct mlx5dv_flow_action_attr *)(actions[i]));
return mlx5dv_create_flow(matcher, match_value,
num_actions, actions_attr);
+#endif
#else
(void)matcher;
(void)match_value;
#endif
}
-static int
-mlx5_glue_dv_destroy_flow_matcher(struct mlx5dv_flow_matcher *matcher)
-{
-#ifdef HAVE_IBV_FLOW_DV_SUPPORT
- return mlx5dv_destroy_flow_matcher(matcher);
-#else
- (void)matcher;
- return 0;
-#endif
-}
-
static void *
mlx5_glue_dv_create_flow_action_counter(void *counter_obj, uint32_t offset)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_action_devx_counter(counter_obj, offset);
+#else
struct mlx5dv_flow_action_attr *action;
(void)offset;
action->type = MLX5DV_FLOW_ACTION_COUNTERS_DEVX;
action->obj = counter_obj;
return action;
+#endif
#else
(void)counter_obj;
(void)offset;
mlx5_glue_dv_create_flow_action_dest_ibv_qp(void *qp)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_action_dest_ibv_qp(qp);
+#else
struct mlx5dv_flow_action_attr *action;
action = malloc(sizeof(*action));
action->type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
action->obj = qp;
return action;
+#endif
#else
(void)qp;
return NULL;
static void *
mlx5_glue_dv_create_flow_action_modify_header
(struct ibv_context *ctx,
+ enum mlx5dv_flow_table_type ft_type,
+ void *ns, uint64_t flags,
size_t actions_sz,
- uint64_t actions[],
- enum mlx5dv_flow_table_type ft_type)
+ uint64_t actions[])
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)ctx;
+ (void)ft_type;
+ return mlx5dv_dr_create_action_modify_header(ns, flags, actions_sz,
+ actions);
+#else
struct mlx5dv_flow_action_attr *action;
+ (void)ns;
+ (void)flags;
action = malloc(sizeof(*action));
if (!action)
return NULL;
action->action = mlx5dv_create_flow_action_modify_header
(ctx, actions_sz, actions, ft_type);
return action;
+#endif
#else
(void)ctx;
+ (void)ft_type;
+ (void)ns;
+ (void)flags;
(void)actions_sz;
(void)actions;
- (void)ft_type;
return NULL;
#endif
}
static void *
mlx5_glue_dv_create_flow_action_packet_reformat
(struct ibv_context *ctx,
- size_t data_sz,
- void *data,
enum mlx5dv_flow_action_packet_reformat_type reformat_type,
- enum mlx5dv_flow_table_type ft_type)
+ enum mlx5dv_flow_table_type ft_type, struct mlx5dv_dr_ns *ns,
+ uint32_t flags, size_t data_sz, void *data)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ (void)ctx;
+ (void)ft_type;
+ return mlx5dv_dr_create_action_packet_reformat(ns, flags,
+ reformat_type, data_sz,
+ data);
+#else
+ (void)ns;
+ (void)flags;
struct mlx5dv_flow_action_attr *action;
action = malloc(sizeof(*action));
action->action = mlx5dv_create_flow_action_packet_reformat
(ctx, data_sz, data, reformat_type, ft_type);
return action;
+#endif
#else
(void)ctx;
- (void)data_sz;
- (void)data;
(void)reformat_type;
(void)ft_type;
+ (void)ns;
+ (void)flags;
+ (void)data_sz;
+ (void)data;
return NULL;
#endif
}
mlx5_glue_dv_create_flow_action_tag(uint32_t tag)
{
#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_create_action_tag(tag);
+#else
struct mlx5dv_flow_action_attr *action;
action = malloc(sizeof(*action));
if (!action)
action->type = MLX5DV_FLOW_ACTION_TAG;
action->tag_value = tag;
return action;
+#endif
#endif
(void)tag;
return NULL;
}
+static int
+mlx5_glue_dv_destroy_flow(void *flow_id)
+{
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_rule(flow_id);
+#else
+ return ibv_destroy_flow(flow_id);
+#endif
+}
+
+static int
+mlx5_glue_dv_destroy_flow_matcher(void *matcher)
+{
+#ifdef HAVE_IBV_FLOW_DV_SUPPORT
+#ifdef HAVE_MLX5DV_DR
+ return mlx5dv_dr_destroy_matcher(matcher);
+#else
+ return mlx5dv_destroy_flow_matcher(matcher);
+#endif
+#else
+ (void)matcher;
+ return 0;
+#endif
+}
+
static struct ibv_context *
mlx5_glue_dv_open_device(struct ibv_device *device)
{
.get_async_event = mlx5_glue_get_async_event,
.port_state_str = mlx5_glue_port_state_str,
.cq_ex_to_cq = mlx5_glue_cq_ex_to_cq,
+ .dr_create_flow_tbl = mlx5_glue_dr_create_flow_tbl,
+ .dr_destroy_flow_tbl = mlx5_glue_dr_destroy_flow_tbl,
+ .dr_create_ns = mlx5_glue_dr_create_ns,
+ .dr_destroy_ns = mlx5_glue_dr_destroy_ns,
.dv_create_cq = mlx5_glue_dv_create_cq,
.dv_create_wq = mlx5_glue_dv_create_wq,
.dv_query_device = mlx5_glue_dv_query_device,
.dv_init_obj = mlx5_glue_dv_init_obj,
.dv_create_qp = mlx5_glue_dv_create_qp,
.dv_create_flow_matcher = mlx5_glue_dv_create_flow_matcher,
- .dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
.dv_create_flow = mlx5_glue_dv_create_flow,
.dv_create_flow_action_counter =
mlx5_glue_dv_create_flow_action_counter,
.dv_create_flow_action_packet_reformat =
mlx5_glue_dv_create_flow_action_packet_reformat,
.dv_create_flow_action_tag = mlx5_glue_dv_create_flow_action_tag,
+ .dv_destroy_flow = mlx5_glue_dv_destroy_flow,
+ .dv_destroy_flow_matcher = mlx5_glue_dv_destroy_flow_matcher,
.dv_open_device = mlx5_glue_dv_open_device,
.devx_obj_create = mlx5_glue_devx_obj_create,
.devx_obj_destroy = mlx5_glue_devx_obj_destroy,
struct mlx5dv_devx_obj;
#endif
+#ifndef HAVE_MLX5DV_DR
+struct mlx5dv_dr_ns;
+enum mlx5dv_dr_ns_domain { unused, };
+#endif
+
/* LIB_GLUE_VERSION must be updated every time this structure is modified. */
struct mlx5_glue {
const char *version;
struct ibv_async_event *event);
const char *(*port_state_str)(enum ibv_port_state port_state);
struct ibv_cq *(*cq_ex_to_cq)(struct ibv_cq_ex *cq);
+ void *(*dr_create_flow_tbl)(void *ns, uint32_t level);
+ int (*dr_destroy_flow_tbl)(void *tbl);
+ void *(*dr_create_ns)(struct ibv_context *ctx,
+ enum mlx5dv_dr_ns_domain domain);
+ int (*dr_destroy_ns)(void *ns);
struct ibv_cq_ex *(*dv_create_cq)
(struct ibv_context *context,
struct ibv_cq_init_attr_ex *cq_attr,
(struct ibv_context *context,
struct ibv_qp_init_attr_ex *qp_init_attr_ex,
struct mlx5dv_qp_init_attr *dv_qp_init_attr);
- struct mlx5dv_flow_matcher *(*dv_create_flow_matcher)
+ void *(*dv_create_flow_matcher)
(struct ibv_context *context,
- struct mlx5dv_flow_matcher_attr *matcher_attr);
- int (*dv_destroy_flow_matcher)(struct mlx5dv_flow_matcher *matcher);
- struct ibv_flow *(*dv_create_flow)(struct mlx5dv_flow_matcher *matcher,
- struct mlx5dv_flow_match_parameters *match_value,
+ struct mlx5dv_flow_matcher_attr *matcher_attr,
+ void *tbl);
+ void *(*dv_create_flow)(void *matcher, void *match_value,
size_t num_actions, void *actions[]);
void *(*dv_create_flow_action_counter)(void *obj, uint32_t offset);
void *(*dv_create_flow_action_dest_ibv_qp)(void *qp);
void *(*dv_create_flow_action_modify_header)
- (struct ibv_context *ctx, size_t actions_sz, uint64_t actions[],
- enum mlx5dv_flow_table_type ft_type);
+ (struct ibv_context *ctx, enum mlx5dv_flow_table_type ft_type,
+ void *ns, uint64_t flags, size_t actions_sz,
+ uint64_t actions[]);
void *(*dv_create_flow_action_packet_reformat)
- (struct ibv_context *ctx, size_t data_sz, void *data,
+ (struct ibv_context *ctx,
enum mlx5dv_flow_action_packet_reformat_type reformat_type,
- enum mlx5dv_flow_table_type ft_type);
+ enum mlx5dv_flow_table_type ft_type, struct mlx5dv_dr_ns *ns,
+ uint32_t flags, size_t data_sz, void *data);
void *(*dv_create_flow_action_tag)(uint32_t tag);
+ int (*dv_destroy_flow)(void *flow);
+ int (*dv_destroy_flow_matcher)(void *matcher);
struct ibv_context *(*dv_open_device)(struct ibv_device *device);
struct mlx5dv_devx_obj *(*devx_obj_create)
(struct ibv_context *ctx,
u8 reserved_at_1a0[0x60];
};
+struct mlx5_ifc_fte_match_set_misc3_bits {
+ u8 inner_tcp_seq_num[0x20];
+ u8 outer_tcp_seq_num[0x20];
+ u8 inner_tcp_ack_num[0x20];
+ u8 outer_tcp_ack_num[0x20];
+ u8 reserved_at_auto1[0x8];
+ u8 outer_vxlan_gpe_vni[0x18];
+ u8 outer_vxlan_gpe_next_protocol[0x8];
+ u8 outer_vxlan_gpe_flags[0x8];
+ u8 reserved_at_a8[0x10];
+ u8 icmp_header_data[0x20];
+ u8 icmpv6_header_data[0x20];
+ u8 icmp_type[0x8];
+ u8 icmp_code[0x8];
+ u8 icmpv6_type[0x8];
+ u8 icmpv6_code[0x8];
+ u8 reserved_at_1a0[0xe0];
+};
+
/* Flow matcher. */
struct mlx5_ifc_fte_match_param_bits {
struct mlx5_ifc_fte_match_set_lyr_2_4_bits outer_headers;
struct mlx5_ifc_fte_match_set_misc_bits misc_parameters;
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
struct mlx5_ifc_fte_match_set_misc2_bits misc_parameters_2;
- u8 reserved_at_800[0x800];
+ struct mlx5_ifc_fte_match_set_misc3_bits misc_parameters_3;
};
enum {
MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT,
MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT,
MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT,
- MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT
+ MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT,
+ MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT
};
enum {