struct sfc_port port;
struct sfc_sw_stats sw_stats;
struct sfc_flow_rss flow_rss;
- /* Registry of tunnel offload contexts */
- struct sfc_flow_tunnel flow_tunnels[SFC_FT_MAX_NTUNNELS];
+ /* Registry of contexts used in Flow Tunnel (FT) offload */
+ struct sfc_ft_ctx ft_ctx_pool[SFC_FT_MAX_NTUNNELS];
struct sfc_filter filter;
struct sfc_mae mae;
struct sfc_repr_proxy repr_proxy;
return 0;
}
-int sfc_dp_ft_id_offset = -1;
-uint64_t sfc_dp_ft_id_valid;
+int sfc_dp_ft_ctx_id_offset = -1;
+uint64_t sfc_dp_ft_ctx_id_valid;
int
-sfc_dp_ft_id_register(void)
+sfc_dp_ft_ctx_id_register(void)
{
- static const struct rte_mbuf_dynfield ft_id = {
- .name = "rte_net_sfc_dynfield_ft_id",
+ static const struct rte_mbuf_dynfield ft_ctx_id = {
+ .name = "rte_net_sfc_dynfield_ft_ctx_id",
.size = sizeof(uint8_t),
.align = __alignof__(uint8_t),
};
- static const struct rte_mbuf_dynflag ft_id_valid = {
- .name = "rte_net_sfc_dynflag_ft_id_valid",
+ static const struct rte_mbuf_dynflag ft_ctx_id_valid = {
+ .name = "rte_net_sfc_dynflag_ft_ctx_id_valid",
};
int field_offset;
SFC_GENERIC_LOG(INFO, "%s() entry", __func__);
- if (sfc_dp_ft_id_valid != 0) {
+ if (sfc_dp_ft_ctx_id_valid != 0) {
SFC_GENERIC_LOG(INFO, "%s() already registered", __func__);
return 0;
}
- field_offset = rte_mbuf_dynfield_register(&ft_id);
+ field_offset = rte_mbuf_dynfield_register(&ft_ctx_id);
if (field_offset < 0) {
- SFC_GENERIC_LOG(ERR, "%s() failed to register ft_id dynfield",
+ SFC_GENERIC_LOG(ERR, "%s() failed to register ft_ctx_id dynfield",
__func__);
return -1;
}
- flag = rte_mbuf_dynflag_register(&ft_id_valid);
+ flag = rte_mbuf_dynflag_register(&ft_ctx_id_valid);
if (flag < 0) {
- SFC_GENERIC_LOG(ERR, "%s() failed to register ft_id dynflag",
+ SFC_GENERIC_LOG(ERR, "%s() failed to register ft_ctx_id dynflag",
__func__);
return -1;
}
- sfc_dp_ft_id_offset = field_offset;
- sfc_dp_ft_id_valid = UINT64_C(1) << flag;
+ sfc_dp_ft_ctx_id_offset = field_offset;
+ sfc_dp_ft_ctx_id_valid = UINT64_C(1) << flag;
SFC_GENERIC_LOG(INFO, "%s() done", __func__);
*/
int sfc_dp_mport_register(void);
-/** Dynamically registered mbuf "ft_id" validity flag (as a bitmask). */
-extern uint64_t sfc_dp_ft_id_valid;
+/** Dynamically registered mbuf "ft_ctx_id" validity flag (as a bitmask). */
+extern uint64_t sfc_dp_ft_ctx_id_valid;
-/** Dynamically registered mbuf field "ft_id" (mbuf byte offset). */
-extern int sfc_dp_ft_id_offset;
+/** Dynamically registered mbuf field "ft_ctx_id" (mbuf byte offset). */
+extern int sfc_dp_ft_ctx_id_offset;
-/** Register dynamic mbuf field "ft_id" and its validity flag. */
-int sfc_dp_ft_id_register(void);
+/** Register dynamic mbuf field "ft_ctx_id" and its validity flag. */
+int sfc_dp_ft_ctx_id_register(void);
#ifdef __cplusplus
}
}
if (rxq->flags & SFC_EF100_RXQ_USER_MARK) {
- uint8_t tunnel_mark;
+ uint8_t ft_ctx_mark;
uint32_t user_mark;
uint32_t mark;
m->hash.fdir.hi = user_mark;
}
- tunnel_mark = SFC_FT_GET_TUNNEL_MARK(mark);
- if (tunnel_mark != SFC_FT_TUNNEL_MARK_INVALID) {
- sfc_ft_id_t ft_id;
+ ft_ctx_mark = SFC_FT_FLOW_MARK_TO_CTX_MARK(mark);
+ if (ft_ctx_mark != SFC_FT_CTX_MARK_INVALID) {
+ sfc_ft_ctx_id_t ft_ctx_id;
- ft_id = SFC_FT_TUNNEL_MARK_TO_ID(tunnel_mark);
+ ft_ctx_id = SFC_FT_CTX_MARK_TO_CTX_ID(ft_ctx_mark);
- ol_flags |= sfc_dp_ft_id_valid;
- *RTE_MBUF_DYNFIELD(m, sfc_dp_ft_id_offset,
- sfc_ft_id_t *) = ft_id;
+ ol_flags |= sfc_dp_ft_ctx_id_valid;
+ *RTE_MBUF_DYNFIELD(m, sfc_dp_ft_ctx_id_offset,
+ sfc_ft_ctx_id_t *) = ft_ctx_id;
}
}
if ((sa->priv.dp_rx->features & SFC_DP_RX_FEAT_FLOW_MARK) != 0)
supported |= RTE_ETH_RX_METADATA_USER_MARK;
- if (sfc_flow_tunnel_is_supported(sa))
+ if (sfc_ft_is_supported(sa))
supported |= RTE_ETH_RX_METADATA_TUNNEL_ID;
sa->negotiated_rx_metadata = supported & *features;
uint32_t mark_max;
mark_max = encp->enc_filter_action_mark_max;
- if (sfc_flow_tunnel_is_active(sa))
+ if (sfc_ft_is_active(sa))
mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
if (mark == NULL || mark->id > mark_max)
int rc;
/*
- * If the flow is meant to be a JUMP rule in tunnel offload,
+ * If the flow is meant to be a TUNNEL rule in a FT context,
* preparse its actions and save its properties in spec_mae.
*/
- rc = sfc_flow_tunnel_detect_jump_rule(sa, actions, spec_mae, error);
+ rc = sfc_ft_tunnel_rule_detect(sa, actions, spec_mae, error);
if (rc != 0)
goto fail;
if (rc != 0)
goto fail;
- if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
/*
* By design, this flow should be represented solely by the
* outer rule. But the HW/FW hasn't got support for setting
if (rc != 0)
goto fail;
- if (spec_mae->ft != NULL) {
- if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
- spec_mae->ft->jump_rule_is_set = B_TRUE;
+ if (spec_mae->ft_ctx != NULL) {
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL)
+ spec_mae->ft_ctx->tunnel_rule_is_set = B_TRUE;
- ++(spec_mae->ft->refcnt);
+ ++(spec_mae->ft_ctx->refcnt);
}
return 0;
fail:
/* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
- spec_mae->ft = NULL;
+ spec_mae->ft_ctx = NULL;
return rc;
}
.flush = sfc_flow_flush,
.query = sfc_flow_query,
.isolate = sfc_flow_isolate,
- .tunnel_decap_set = sfc_flow_tunnel_decap_set,
- .tunnel_match = sfc_flow_tunnel_match,
- .tunnel_action_decap_release = sfc_flow_tunnel_action_decap_release,
- .tunnel_item_release = sfc_flow_tunnel_item_release,
- .get_restore_info = sfc_flow_tunnel_get_restore_info,
+ .tunnel_decap_set = sfc_ft_decap_set,
+ .tunnel_match = sfc_ft_match,
+ .tunnel_action_decap_release = sfc_ft_action_decap_release,
+ .tunnel_item_release = sfc_ft_item_release,
+ .get_restore_info = sfc_ft_get_restore_info,
.pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
};
SFC_ASSERT(sfc_adapter_is_locked(sa));
- sfc_flow_tunnel_reset_hit_counters(sa);
+ sfc_ft_counters_reset(sa);
TAILQ_FOREACH(flow, &sa->flow_list, entries) {
rc = sfc_flow_insert(sa, flow, NULL);
};
/* Indicates the role of a given flow in tunnel offload */
-enum sfc_flow_tunnel_rule_type {
+enum sfc_ft_rule_type {
/* The flow has nothing to do with tunnel offload */
SFC_FT_RULE_NONE = 0,
- /* The flow represents a JUMP rule */
- SFC_FT_RULE_JUMP,
- /* The flow represents a GROUP rule */
- SFC_FT_RULE_GROUP,
+ /* The flow is a TUNNEL rule, to match on an outer header */
+ SFC_FT_RULE_TUNNEL,
+ /*
+ * The flow is a SWITCH rule, to discard the outer header
+ * and dispatch the resulting packets to a vSwitch tenant
+ */
+ SFC_FT_RULE_SWITCH,
};
/* MAE-specific flow specification */
struct sfc_flow_spec_mae {
/* FLow Tunnel (FT) rule type (or NONE) */
- enum sfc_flow_tunnel_rule_type ft_rule_type;
+ enum sfc_ft_rule_type ft_rule_type;
/* Flow Tunnel (FT) context (or NULL) */
- struct sfc_flow_tunnel *ft;
+ struct sfc_ft_ctx *ft_ctx;
/* Desired priority level */
unsigned int priority;
/* Outer rule registry entry */
#include "sfc_mae.h"
bool
-sfc_flow_tunnel_is_supported(struct sfc_adapter *sa)
+sfc_ft_is_supported(struct sfc_adapter *sa)
{
SFC_ASSERT(sfc_adapter_is_locked(sa));
}
bool
-sfc_flow_tunnel_is_active(struct sfc_adapter *sa)
+sfc_ft_is_active(struct sfc_adapter *sa)
{
SFC_ASSERT(sfc_adapter_is_locked(sa));
RTE_ETH_RX_METADATA_TUNNEL_ID) != 0);
}
-struct sfc_flow_tunnel *
-sfc_flow_tunnel_pick(struct sfc_adapter *sa, uint32_t ft_mark)
+struct sfc_ft_ctx *
+sfc_ft_ctx_pick(struct sfc_adapter *sa, uint32_t flow_mark)
{
- uint32_t tunnel_mark = SFC_FT_GET_TUNNEL_MARK(ft_mark);
+ uint8_t ft_ctx_mark = SFC_FT_FLOW_MARK_TO_CTX_MARK(flow_mark);
SFC_ASSERT(sfc_adapter_is_locked(sa));
- if (tunnel_mark != SFC_FT_TUNNEL_MARK_INVALID) {
- sfc_ft_id_t ft_id = SFC_FT_TUNNEL_MARK_TO_ID(tunnel_mark);
- struct sfc_flow_tunnel *ft = &sa->flow_tunnels[ft_id];
+ if (ft_ctx_mark != SFC_FT_CTX_MARK_INVALID) {
+ sfc_ft_ctx_id_t ft_ctx_id = SFC_FT_CTX_MARK_TO_CTX_ID(ft_ctx_mark);
+ struct sfc_ft_ctx *ft_ctx = &sa->ft_ctx_pool[ft_ctx_id];
- ft->id = ft_id;
+ ft_ctx->id = ft_ctx_id;
- return ft;
+ return ft_ctx;
}
return NULL;
}
int
-sfc_flow_tunnel_detect_jump_rule(struct sfc_adapter *sa,
- const struct rte_flow_action *actions,
- struct sfc_flow_spec_mae *spec,
- struct rte_flow_error *error)
+sfc_ft_tunnel_rule_detect(struct sfc_adapter *sa,
+ const struct rte_flow_action *actions,
+ struct sfc_flow_spec_mae *spec,
+ struct rte_flow_error *error)
{
const struct rte_flow_action_mark *action_mark = NULL;
const struct rte_flow_action_jump *action_jump = NULL;
- struct sfc_flow_tunnel *ft;
- uint32_t ft_mark = 0;
+ struct sfc_ft_ctx *ft_ctx;
+ uint32_t flow_mark = 0;
int rc = 0;
SFC_ASSERT(sfc_adapter_is_locked(sa));
- if (!sfc_flow_tunnel_is_active(sa)) {
+ if (!sfc_ft_is_active(sa)) {
/* Tunnel-related actions (if any) will be turned down later. */
return 0;
}
case RTE_FLOW_ACTION_TYPE_MARK:
if (action_mark == NULL) {
action_mark = actions->conf;
- ft_mark = action_mark->id;
+ flow_mark = action_mark->id;
} else {
rc = EINVAL;
}
}
}
- ft = sfc_flow_tunnel_pick(sa, ft_mark);
- if (ft != NULL && action_jump != 0) {
- sfc_dbg(sa, "tunnel offload: JUMP: detected");
+ ft_ctx = sfc_ft_ctx_pick(sa, flow_mark);
+ if (ft_ctx != NULL && action_jump != 0) {
+ sfc_dbg(sa, "FT: TUNNEL: detected");
if (rc != 0) {
/* The loop above might have spotted wrong actions. */
- sfc_err(sa, "tunnel offload: JUMP: invalid actions: %s",
+ sfc_err(sa, "FT: TUNNEL: invalid actions: %s",
strerror(rc));
goto fail;
}
- if (ft->refcnt == 0) {
- sfc_err(sa, "tunnel offload: JUMP: tunnel=%u does not exist",
- ft->id);
+ if (ft_ctx->refcnt == 0) {
+ sfc_err(sa, "FT: TUNNEL: inactive context (ID=%u)",
+ ft_ctx->id);
rc = ENOENT;
goto fail;
}
- if (ft->jump_rule_is_set) {
- sfc_err(sa, "tunnel offload: JUMP: already exists in tunnel=%u",
- ft->id);
+ if (ft_ctx->tunnel_rule_is_set) {
+ sfc_err(sa, "FT: TUNNEL: already setup context (ID=%u)",
+ ft_ctx->id);
rc = EEXIST;
goto fail;
}
- spec->ft_rule_type = SFC_FT_RULE_JUMP;
- spec->ft = ft;
+ spec->ft_rule_type = SFC_FT_RULE_TUNNEL;
+ spec->ft_ctx = ft_ctx;
}
return 0;
fail:
return rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "tunnel offload: JUMP: preparsing failed");
+ "FT: TUNNEL: preparsing failed");
}
static int
-sfc_flow_tunnel_attach(struct sfc_adapter *sa,
- struct rte_flow_tunnel *tunnel,
- struct sfc_flow_tunnel **ftp)
+sfc_ft_ctx_attach(struct sfc_adapter *sa, const struct rte_flow_tunnel *tunnel,
+ struct sfc_ft_ctx **ft_ctxp)
{
- struct sfc_flow_tunnel *ft;
- const char *ft_status;
- int ft_id_free = -1;
- sfc_ft_id_t ft_id;
+ sfc_ft_ctx_id_t ft_ctx_id;
+ struct sfc_ft_ctx *ft_ctx;
+ const char *ft_ctx_status;
+ int ft_ctx_id_free = -1;
int rc;
SFC_ASSERT(sfc_adapter_is_locked(sa));
- rc = sfc_dp_ft_id_register();
+ rc = sfc_dp_ft_ctx_id_register();
if (rc != 0)
return rc;
if (tunnel->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
- sfc_err(sa, "tunnel offload: unsupported tunnel (encapsulation) type");
+ sfc_err(sa, "FT: unsupported tunnel (encapsulation) type");
return ENOTSUP;
}
- for (ft_id = 0; ft_id < SFC_FT_MAX_NTUNNELS; ++ft_id) {
- ft = &sa->flow_tunnels[ft_id];
+ for (ft_ctx_id = 0; ft_ctx_id < SFC_FT_MAX_NTUNNELS; ++ft_ctx_id) {
+ ft_ctx = &sa->ft_ctx_pool[ft_ctx_id];
- if (ft->refcnt == 0) {
- if (ft_id_free == -1)
- ft_id_free = ft_id;
+ if (ft_ctx->refcnt == 0) {
+ if (ft_ctx_id_free == -1)
+ ft_ctx_id_free = ft_ctx_id;
continue;
}
- if (memcmp(tunnel, &ft->rte_tunnel, sizeof(*tunnel)) == 0) {
- ft_status = "existing";
+ if (memcmp(tunnel, &ft_ctx->tunnel, sizeof(*tunnel)) == 0) {
+ ft_ctx_status = "existing";
goto attach;
}
}
- if (ft_id_free == -1) {
- sfc_err(sa, "tunnel offload: no free slot for the new tunnel");
+ if (ft_ctx_id_free == -1) {
+ sfc_err(sa, "FT: no free slot for the new context");
return ENOBUFS;
}
- ft_id = ft_id_free;
- ft = &sa->flow_tunnels[ft_id];
+ ft_ctx_id = ft_ctx_id_free;
+ ft_ctx = &sa->ft_ctx_pool[ft_ctx_id];
- memcpy(&ft->rte_tunnel, tunnel, sizeof(*tunnel));
+ memcpy(&ft_ctx->tunnel, tunnel, sizeof(*tunnel));
- ft->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+ ft_ctx->encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
- ft->action_mark.id = SFC_FT_ID_TO_MARK(ft_id_free);
- ft->action.type = RTE_FLOW_ACTION_TYPE_MARK;
- ft->action.conf = &ft->action_mark;
+ ft_ctx->action_mark.id = SFC_FT_CTX_ID_TO_FLOW_MARK(ft_ctx_id);
+ ft_ctx->action.type = RTE_FLOW_ACTION_TYPE_MARK;
+ ft_ctx->action.conf = &ft_ctx->action_mark;
- ft->item.type = RTE_FLOW_ITEM_TYPE_MARK;
- ft->item_mark_v.id = ft->action_mark.id;
- ft->item.spec = &ft->item_mark_v;
- ft->item.mask = &ft->item_mark_m;
- ft->item_mark_m.id = UINT32_MAX;
+ ft_ctx->item_mark_v.id = ft_ctx->action_mark.id;
+ ft_ctx->item.type = RTE_FLOW_ITEM_TYPE_MARK;
+ ft_ctx->item.spec = &ft_ctx->item_mark_v;
+ ft_ctx->item.mask = &ft_ctx->item_mark_m;
+ ft_ctx->item_mark_m.id = UINT32_MAX;
- ft->jump_rule_is_set = B_FALSE;
+ ft_ctx->tunnel_rule_is_set = B_FALSE;
- ft->refcnt = 0;
+ ft_ctx->refcnt = 0;
- ft_status = "newly added";
+ ft_ctx_status = "newly added";
attach:
- sfc_dbg(sa, "tunnel offload: attaching to %s tunnel=%u",
- ft_status, ft_id);
+ sfc_dbg(sa, "FT: attaching to %s context (ID=%u)",
+ ft_ctx_status, ft_ctx_id);
- ++(ft->refcnt);
- *ftp = ft;
+ ++(ft_ctx->refcnt);
+ *ft_ctxp = ft_ctx;
return 0;
}
static int
-sfc_flow_tunnel_detach(struct sfc_adapter *sa,
- uint32_t ft_mark)
+sfc_ft_ctx_detach(struct sfc_adapter *sa, uint32_t flow_mark)
{
- struct sfc_flow_tunnel *ft;
+ struct sfc_ft_ctx *ft_ctx;
SFC_ASSERT(sfc_adapter_is_locked(sa));
- ft = sfc_flow_tunnel_pick(sa, ft_mark);
- if (ft == NULL) {
- sfc_err(sa, "tunnel offload: invalid tunnel");
+ ft_ctx = sfc_ft_ctx_pick(sa, flow_mark);
+ if (ft_ctx == NULL) {
+ sfc_err(sa, "FT: invalid context");
return EINVAL;
}
- if (ft->refcnt == 0) {
- sfc_err(sa, "tunnel offload: tunnel=%u does not exist", ft->id);
+ if (ft_ctx->refcnt == 0) {
+ sfc_err(sa, "FT: inactive context (ID=%u)", ft_ctx->id);
return ENOENT;
}
- --(ft->refcnt);
+ --(ft_ctx->refcnt);
return 0;
}
int
-sfc_flow_tunnel_decap_set(struct rte_eth_dev *dev,
- struct rte_flow_tunnel *tunnel,
- struct rte_flow_action **pmd_actions,
- uint32_t *num_of_actions,
- struct rte_flow_error *err)
+sfc_ft_decap_set(struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel,
+ struct rte_flow_action **pmd_actions, uint32_t *num_of_actions,
+ struct rte_flow_error *err)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
- struct sfc_flow_tunnel *ft;
+ struct sfc_ft_ctx *ft_ctx;
int rc;
sfc_adapter_lock(sa);
- if (!sfc_flow_tunnel_is_active(sa)) {
+ if (!sfc_ft_is_active(sa)) {
rc = ENOTSUP;
goto fail;
}
- rc = sfc_flow_tunnel_attach(sa, tunnel, &ft);
+ rc = sfc_ft_ctx_attach(sa, tunnel, &ft_ctx);
if (rc != 0)
goto fail;
- *pmd_actions = &ft->action;
+ *pmd_actions = &ft_ctx->action;
*num_of_actions = 1;
sfc_adapter_unlock(sa);
return rte_flow_error_set(err, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "tunnel offload: decap_set failed");
+ "FT: decap_set failed");
}
int
-sfc_flow_tunnel_match(struct rte_eth_dev *dev,
- struct rte_flow_tunnel *tunnel,
- struct rte_flow_item **pmd_items,
- uint32_t *num_of_items,
- struct rte_flow_error *err)
+sfc_ft_match(struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel,
+ struct rte_flow_item **pmd_items, uint32_t *num_of_items,
+ struct rte_flow_error *err)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
- struct sfc_flow_tunnel *ft;
+ struct sfc_ft_ctx *ft_ctx;
int rc;
sfc_adapter_lock(sa);
- if (!sfc_flow_tunnel_is_active(sa)) {
+ if (!sfc_ft_is_active(sa)) {
rc = ENOTSUP;
goto fail;
}
- rc = sfc_flow_tunnel_attach(sa, tunnel, &ft);
+ rc = sfc_ft_ctx_attach(sa, tunnel, &ft_ctx);
if (rc != 0)
goto fail;
- *pmd_items = &ft->item;
+ *pmd_items = &ft_ctx->item;
*num_of_items = 1;
sfc_adapter_unlock(sa);
return rte_flow_error_set(err, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "tunnel offload: tunnel_match failed");
+ "FT: tunnel_match failed");
}
int
-sfc_flow_tunnel_item_release(struct rte_eth_dev *dev,
- struct rte_flow_item *pmd_items,
- uint32_t num_items,
- struct rte_flow_error *err)
+sfc_ft_item_release(struct rte_eth_dev *dev, struct rte_flow_item *pmd_items,
+ uint32_t num_items, struct rte_flow_error *err)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
const struct rte_flow_item_mark *item_mark;
sfc_adapter_lock(sa);
- if (!sfc_flow_tunnel_is_active(sa)) {
+ if (!sfc_ft_is_active(sa)) {
rc = ENOTSUP;
goto fail;
}
if (num_items != 1 || item == NULL || item->spec == NULL ||
item->type != RTE_FLOW_ITEM_TYPE_MARK) {
- sfc_err(sa, "tunnel offload: item_release: wrong input");
+ sfc_err(sa, "FT: item_release: wrong input");
rc = EINVAL;
goto fail;
}
item_mark = item->spec;
- rc = sfc_flow_tunnel_detach(sa, item_mark->id);
+ rc = sfc_ft_ctx_detach(sa, item_mark->id);
if (rc != 0)
goto fail;
return rte_flow_error_set(err, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "tunnel offload: item_release failed");
+ "FT: item_release failed");
}
int
-sfc_flow_tunnel_action_decap_release(struct rte_eth_dev *dev,
- struct rte_flow_action *pmd_actions,
- uint32_t num_actions,
- struct rte_flow_error *err)
+sfc_ft_action_decap_release(struct rte_eth_dev *dev,
+ struct rte_flow_action *pmd_actions,
+ uint32_t num_actions, struct rte_flow_error *err)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
const struct rte_flow_action_mark *action_mark;
sfc_adapter_lock(sa);
- if (!sfc_flow_tunnel_is_active(sa)) {
+ if (!sfc_ft_is_active(sa)) {
rc = ENOTSUP;
goto fail;
}
if (num_actions != 1 || action == NULL || action->conf == NULL ||
action->type != RTE_FLOW_ACTION_TYPE_MARK) {
- sfc_err(sa, "tunnel offload: action_decap_release: wrong input");
+ sfc_err(sa, "FT: action_decap_release: wrong input");
rc = EINVAL;
goto fail;
}
action_mark = action->conf;
- rc = sfc_flow_tunnel_detach(sa, action_mark->id);
+ rc = sfc_ft_ctx_detach(sa, action_mark->id);
if (rc != 0)
goto fail;
return rte_flow_error_set(err, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "tunnel offload: item_release failed");
+ "FT: item_release failed");
}
int
-sfc_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
- struct rte_mbuf *m,
- struct rte_flow_restore_info *info,
- struct rte_flow_error *err)
+sfc_ft_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m,
+ struct rte_flow_restore_info *info,
+ struct rte_flow_error *err)
{
struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
- const struct sfc_flow_tunnel *ft;
- sfc_ft_id_t ft_id;
+ const struct sfc_ft_ctx *ft_ctx;
+ sfc_ft_ctx_id_t ft_ctx_id;
int rc;
sfc_adapter_lock(sa);
- if ((m->ol_flags & sfc_dp_ft_id_valid) == 0) {
- sfc_dbg(sa, "tunnel offload: get_restore_info: no tunnel mark in the packet");
+ if ((m->ol_flags & sfc_dp_ft_ctx_id_valid) == 0) {
+ sfc_dbg(sa, "FT: get_restore_info: no FT context mark in the packet");
rc = EINVAL;
goto fail;
}
- ft_id = *RTE_MBUF_DYNFIELD(m, sfc_dp_ft_id_offset, sfc_ft_id_t *);
- ft = &sa->flow_tunnels[ft_id];
+ ft_ctx_id = *RTE_MBUF_DYNFIELD(m, sfc_dp_ft_ctx_id_offset,
+ sfc_ft_ctx_id_t *);
+ ft_ctx = &sa->ft_ctx_pool[ft_ctx_id];
- if (ft->refcnt == 0) {
- sfc_dbg(sa, "tunnel offload: get_restore_info: tunnel=%u does not exist",
- ft_id);
+ if (ft_ctx->refcnt == 0) {
+ sfc_dbg(sa, "FT: get_restore_info: inactive context (ID=%u)",
+ ft_ctx_id);
rc = ENOENT;
goto fail;
}
- memcpy(&info->tunnel, &ft->rte_tunnel, sizeof(info->tunnel));
+ memcpy(&info->tunnel, &ft_ctx->tunnel, sizeof(info->tunnel));
/*
- * The packet still has encapsulation header; JUMP rules never
+ * The packet still has encapsulation header; TUNNEL rules never
* strip it. Therefore, set RTE_FLOW_RESTORE_INFO_ENCAPSULATED.
*/
info->flags = RTE_FLOW_RESTORE_INFO_ENCAPSULATED |
return rte_flow_error_set(err, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "tunnel offload: get_restore_info failed");
+ "FT: get_restore_info failed");
}
void
-sfc_flow_tunnel_reset_hit_counters(struct sfc_adapter *sa)
+sfc_ft_counters_reset(struct sfc_adapter *sa)
{
unsigned int i;
SFC_ASSERT(sfc_adapter_is_locked(sa));
SFC_ASSERT(sa->state != SFC_ETHDEV_STARTED);
- for (i = 0; i < RTE_DIM(sa->flow_tunnels); ++i) {
- struct sfc_flow_tunnel *ft = &sa->flow_tunnels[i];
+ for (i = 0; i < RTE_DIM(sa->ft_ctx_pool); ++i) {
+ struct sfc_ft_ctx *ft_ctx = &sa->ft_ctx_pool[i];
- ft->reset_jump_hit_counter = 0;
- ft->group_hit_counter = 0;
+ ft_ctx->reset_tunnel_hit_counter = 0;
+ ft_ctx->switch_hit_counter = 0;
}
}
#endif
/** Flow Tunnel (FT) SW entry ID */
-typedef uint8_t sfc_ft_id_t;
+typedef uint8_t sfc_ft_ctx_id_t;
-#define SFC_FT_TUNNEL_MARK_BITS \
- (sizeof(sfc_ft_id_t) * CHAR_BIT)
+#define SFC_FT_CTX_MARK_BITS \
+ (sizeof(sfc_ft_ctx_id_t) * CHAR_BIT)
#define SFC_FT_USER_MARK_BITS \
- (sizeof(uint32_t) * CHAR_BIT - SFC_FT_TUNNEL_MARK_BITS)
+ (sizeof(uint32_t) * CHAR_BIT - SFC_FT_CTX_MARK_BITS)
#define SFC_FT_USER_MARK_MASK \
RTE_LEN2MASK(SFC_FT_USER_MARK_BITS, uint32_t)
-#define SFC_FT_GET_TUNNEL_MARK(_mark) \
- ((_mark) >> SFC_FT_USER_MARK_BITS)
+#define SFC_FT_FLOW_MARK_TO_CTX_MARK(_flow_mark) \
+ ((_flow_mark) >> SFC_FT_USER_MARK_BITS)
-#define SFC_FT_TUNNEL_MARK_INVALID (0)
+#define SFC_FT_CTX_MARK_INVALID (0)
-#define SFC_FT_TUNNEL_MARK_TO_ID(_tunnel_mark) \
- ((_tunnel_mark) - 1)
+#define SFC_FT_CTX_MARK_TO_CTX_ID(_ctx_mark) \
+ ((_ctx_mark) - 1)
-#define SFC_FT_ID_TO_TUNNEL_MARK(_id) \
- ((_id) + 1)
+#define SFC_FT_CTX_ID_TO_CTX_MARK(_ctx_id) \
+ ((_ctx_id) + 1)
-#define SFC_FT_ID_TO_MARK(_id) \
- (SFC_FT_ID_TO_TUNNEL_MARK(_id) << SFC_FT_USER_MARK_BITS)
+#define SFC_FT_CTX_ID_TO_FLOW_MARK(_ctx_id) \
+ (SFC_FT_CTX_ID_TO_CTX_MARK(_ctx_id) << SFC_FT_USER_MARK_BITS)
-#define SFC_FT_GET_USER_MARK(_mark) \
- ((_mark) & SFC_FT_USER_MARK_MASK)
+#define SFC_FT_FLOW_MARK_TO_USER_MARK(_flow_mark) \
+ ((_flow_mark) & SFC_FT_USER_MARK_MASK)
#define SFC_FT_MAX_NTUNNELS \
- (RTE_LEN2MASK(SFC_FT_TUNNEL_MARK_BITS, uint8_t) - 1)
+ (RTE_LEN2MASK(SFC_FT_CTX_MARK_BITS, uint8_t) - 1)
-struct sfc_flow_tunnel {
- bool jump_rule_is_set;
+struct sfc_ft_ctx {
+ bool tunnel_rule_is_set;
efx_tunnel_protocol_t encap_type;
- struct rte_flow_tunnel rte_tunnel;
+ struct rte_flow_tunnel tunnel;
unsigned int refcnt;
- sfc_ft_id_t id;
+ sfc_ft_ctx_id_t id;
struct rte_flow_action_mark action_mark;
struct rte_flow_action action;
struct rte_flow_item_mark item_mark_m;
struct rte_flow_item item;
- uint64_t reset_jump_hit_counter;
- uint64_t group_hit_counter;
+ uint64_t reset_tunnel_hit_counter;
+ uint64_t switch_hit_counter;
};
struct sfc_adapter;
-bool sfc_flow_tunnel_is_supported(struct sfc_adapter *sa);
+bool sfc_ft_is_supported(struct sfc_adapter *sa);
-bool sfc_flow_tunnel_is_active(struct sfc_adapter *sa);
+bool sfc_ft_is_active(struct sfc_adapter *sa);
-struct sfc_flow_tunnel *sfc_flow_tunnel_pick(struct sfc_adapter *sa,
- uint32_t ft_mark);
+struct sfc_ft_ctx *sfc_ft_ctx_pick(struct sfc_adapter *sa, uint32_t flow_mark);
-int sfc_flow_tunnel_detect_jump_rule(struct sfc_adapter *sa,
- const struct rte_flow_action *actions,
- struct sfc_flow_spec_mae *spec,
- struct rte_flow_error *error);
+int sfc_ft_tunnel_rule_detect(struct sfc_adapter *sa,
+ const struct rte_flow_action *actions,
+ struct sfc_flow_spec_mae *spec,
+ struct rte_flow_error *error);
-int sfc_flow_tunnel_decap_set(struct rte_eth_dev *dev,
- struct rte_flow_tunnel *tunnel,
- struct rte_flow_action **pmd_actions,
- uint32_t *num_of_actions,
- struct rte_flow_error *err);
+int sfc_ft_decap_set(struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel,
+ struct rte_flow_action **pmd_actions,
+ uint32_t *num_of_actions, struct rte_flow_error *err);
-int sfc_flow_tunnel_match(struct rte_eth_dev *dev,
- struct rte_flow_tunnel *tunnel,
- struct rte_flow_item **pmd_items,
- uint32_t *num_of_items,
- struct rte_flow_error *err);
+int sfc_ft_match(struct rte_eth_dev *dev, struct rte_flow_tunnel *tunnel,
+ struct rte_flow_item **pmd_items, uint32_t *num_of_items,
+ struct rte_flow_error *err);
-int sfc_flow_tunnel_item_release(struct rte_eth_dev *dev,
- struct rte_flow_item *pmd_items,
- uint32_t num_items,
- struct rte_flow_error *err);
+int sfc_ft_item_release(struct rte_eth_dev *dev,
+ struct rte_flow_item *pmd_items, uint32_t num_items,
+ struct rte_flow_error *err);
-int sfc_flow_tunnel_action_decap_release(struct rte_eth_dev *dev,
- struct rte_flow_action *pmd_actions,
- uint32_t num_actions,
- struct rte_flow_error *err);
+int sfc_ft_action_decap_release(struct rte_eth_dev *dev,
+ struct rte_flow_action *pmd_actions,
+ uint32_t num_actions,
+ struct rte_flow_error *err);
-int sfc_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
- struct rte_mbuf *m,
- struct rte_flow_restore_info *info,
- struct rte_flow_error *err);
+int sfc_ft_get_restore_info(struct rte_eth_dev *dev, struct rte_mbuf *m,
+ struct rte_flow_restore_info *info,
+ struct rte_flow_error *err);
-void sfc_flow_tunnel_reset_hit_counters(struct sfc_adapter *sa);
+void sfc_ft_counters_reset(struct sfc_adapter *sa);
#ifdef __cplusplus
}
}
struct sfc_mae_aset_ctx {
- uint64_t *ft_group_hit_counter;
+ uint64_t *ft_switch_hit_counter;
+ struct sfc_ft_ctx *counter_ft_ctx;
struct sfc_mae_encap_header *encap_header;
- struct sfc_flow_tunnel *counter_ft;
unsigned int n_counters;
struct sfc_mae_mac_addr *dst_mac;
struct sfc_mae_mac_addr *src_mac;
action_set->counters[i].mae_id.id =
EFX_MAE_RSRC_ID_INVALID;
- action_set->counters[i].ft_group_hit_counter =
- ctx->ft_group_hit_counter;
- action_set->counters[i].ft = ctx->counter_ft;
+ action_set->counters[i].ft_ctx = ctx->counter_ft_ctx;
+ action_set->counters[i].ft_switch_hit_counter =
+ ctx->ft_switch_hit_counter;
}
for (action = actions, i = 0;
spec_mae = &flow->spec.mae;
- if (spec_mae->ft != NULL) {
- if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
- spec_mae->ft->jump_rule_is_set = B_FALSE;
+ if (spec_mae->ft_ctx != NULL) {
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL)
+ spec_mae->ft_ctx->tunnel_rule_is_set = B_FALSE;
- SFC_ASSERT(spec_mae->ft->refcnt != 0);
- --(spec_mae->ft->refcnt);
+ SFC_ASSERT(spec_mae->ft_ctx->refcnt != 0);
+ --(spec_mae->ft_ctx->refcnt);
}
SFC_ASSERT(spec_mae->rule_id.id == EFX_MAE_RSRC_ID_INVALID);
{
const struct rte_flow_item_mark *spec = item->spec;
struct sfc_mae_parse_ctx *ctx_mae = ctx->mae;
+ struct sfc_ft_ctx *ft_ctx = ctx_mae->ft_ctx;
if (spec == NULL) {
return rte_flow_error_set(error, EINVAL,
* way, sfc_mae_rule_preparse_item_mark() must have
* already parsed it. Only one item MARK is allowed.
*/
- if (ctx_mae->ft_rule_type != SFC_FT_RULE_GROUP ||
- spec->id != (uint32_t)SFC_FT_ID_TO_MARK(ctx_mae->ft->id)) {
+ if (ctx_mae->ft_rule_type != SFC_FT_RULE_SWITCH ||
+ spec->id != (uint32_t)SFC_FT_CTX_ID_TO_FLOW_MARK(ft_ctx->id)) {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
item, "invalid item MARK");
if (rc != 0)
return rc;
- if (ctx_mae->ft_rule_type == SFC_FT_RULE_JUMP && mask != NULL) {
+ if (ctx_mae->ft_rule_type == SFC_FT_RULE_TUNNEL && mask != NULL) {
/*
* The HW/FW hasn't got support for match on MAC addresses in
* outer rules yet (this will change). Match on VLAN presence
const uint8_t *mask = NULL;
int rc;
- if (ctx_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
+ if (ctx_mae->ft_rule_type == SFC_FT_RULE_SWITCH) {
/*
* As a workaround, pattern processing has started from
* this (tunnel) item. No pattern data to process yet.
switch (ctx->ft_rule_type) {
case SFC_FT_RULE_NONE:
break;
- case SFC_FT_RULE_JUMP:
+ case SFC_FT_RULE_TUNNEL:
/* No action rule */
return 0;
- case SFC_FT_RULE_GROUP:
+ case SFC_FT_RULE_SWITCH:
/*
* Match on recirculation ID rather than
* on the outer rule allocation handle.
*/
rc = efx_mae_match_spec_recirc_id_set(ctx->match_spec_action,
- SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id));
+ SFC_FT_CTX_ID_TO_CTX_MARK(ctx->ft_ctx->id));
if (rc != 0) {
return rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "tunnel offload: GROUP: AR: failed to request match on RECIRC_ID");
+ "FT: SWITCH: AR: failed to request match on RECIRC_ID");
}
return 0;
default:
sfc_mae_rule_preparse_item_mark(const struct rte_flow_item_mark *spec,
struct sfc_mae_parse_ctx *ctx)
{
- struct sfc_flow_tunnel *ft;
+ struct sfc_ft_ctx *ft_ctx;
uint32_t user_mark;
if (spec == NULL) {
- sfc_err(ctx->sa, "tunnel offload: GROUP: NULL spec in item MARK");
+ sfc_err(ctx->sa, "FT: SWITCH: NULL spec in item MARK");
return EINVAL;
}
- ft = sfc_flow_tunnel_pick(ctx->sa, spec->id);
- if (ft == NULL) {
- sfc_err(ctx->sa, "tunnel offload: GROUP: invalid tunnel");
+ ft_ctx = sfc_ft_ctx_pick(ctx->sa, spec->id);
+ if (ft_ctx == NULL) {
+ sfc_err(ctx->sa, "FT: SWITCH: invalid context");
return EINVAL;
}
- if (ft->refcnt == 0) {
- sfc_err(ctx->sa, "tunnel offload: GROUP: tunnel=%u does not exist",
- ft->id);
+ if (ft_ctx->refcnt == 0) {
+ sfc_err(ctx->sa, "FT: SWITCH: inactive context (ID=%u)",
+ ft_ctx->id);
return ENOENT;
}
- user_mark = SFC_FT_GET_USER_MARK(spec->id);
+ user_mark = SFC_FT_FLOW_MARK_TO_USER_MARK(spec->id);
if (user_mark != 0) {
- sfc_err(ctx->sa, "tunnel offload: GROUP: invalid item MARK");
+ sfc_err(ctx->sa, "FT: SWITCH: invalid item MARK");
return EINVAL;
}
- sfc_dbg(ctx->sa, "tunnel offload: GROUP: detected");
+ sfc_dbg(ctx->sa, "FT: SWITCH: detected");
- ctx->ft_rule_type = SFC_FT_RULE_GROUP;
- ctx->ft = ft;
+ ctx->ft_rule_type = SFC_FT_RULE_SWITCH;
+ ctx->ft_ctx = ft_ctx;
return 0;
}
if (rc != 0) {
return rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_ITEM,
- pattern, "tunnel offload: GROUP: invalid item MARK");
+ pattern, "FT: SWITCH: invalid item MARK");
}
++pattern;
continue;
if (pattern->type == RTE_FLOW_ITEM_TYPE_END)
return 0;
break;
- case SFC_FT_RULE_JUMP:
+ case SFC_FT_RULE_TUNNEL:
if (pattern->type != RTE_FLOW_ITEM_TYPE_END) {
return rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ITEM,
- pattern, "tunnel offload: JUMP: invalid item");
+ pattern, "FT: TUNNEL: invalid item");
}
- ctx->encap_type = ctx->ft->encap_type;
+ ctx->encap_type = ctx->ft_ctx->encap_type;
break;
- case SFC_FT_RULE_GROUP:
+ case SFC_FT_RULE_SWITCH:
if (pattern->type == RTE_FLOW_ITEM_TYPE_END) {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- NULL, "tunnel offload: GROUP: missing tunnel item");
- } else if (ctx->encap_type != ctx->ft->encap_type) {
+ NULL, "FT: SWITCH: missing tunnel item");
+ } else if (ctx->encap_type != ctx->ft_ctx->encap_type) {
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM,
- pattern, "tunnel offload: GROUP: tunnel type mismatch");
+ pattern, "FT: SWITCH: tunnel type mismatch");
}
/*
}
switch (ctx->ft_rule_type) {
- case SFC_FT_RULE_JUMP:
- recirc_id = SFC_FT_ID_TO_TUNNEL_MARK(ctx->ft->id);
+ case SFC_FT_RULE_TUNNEL:
+ recirc_id = SFC_FT_CTX_ID_TO_CTX_MARK(ctx->ft_ctx->id);
/* FALLTHROUGH */
case SFC_FT_RULE_NONE:
if (ctx->priority >= mae->nb_outer_rule_prios_max) {
"OR: failed to initialise RECIRC_ID");
}
break;
- case SFC_FT_RULE_GROUP:
+ case SFC_FT_RULE_SWITCH:
/* Outermost items -> "ENC" match fields in the action rule. */
ctx->field_ids_remap = field_ids_remap_to_encap;
ctx->match_spec = ctx->match_spec_action;
- /* No own outer rule; match on JUMP OR's RECIRC_ID is used. */
+ /* No own outer rule; match on TUNNEL OR's RECIRC_ID is used. */
ctx->encap_type = EFX_TUNNEL_PROTOCOL_NONE;
break;
default:
memset(&ctx_mae, 0, sizeof(ctx_mae));
ctx_mae.ft_rule_type = spec->ft_rule_type;
ctx_mae.priority = spec->priority;
- ctx_mae.ft = spec->ft;
+ ctx_mae.ft_ctx = spec->ft_ctx;
ctx_mae.sa = sa;
switch (ctx_mae.ft_rule_type) {
- case SFC_FT_RULE_JUMP:
+ case SFC_FT_RULE_TUNNEL:
/*
* By design, this flow should be represented solely by the
* outer rule. But the HW/FW hasn't got support for setting
priority_shift = 1;
/* FALLTHROUGH */
- case SFC_FT_RULE_GROUP:
+ case SFC_FT_RULE_SWITCH:
if (ctx_mae.priority != 0) {
/*
- * Because of the above workaround, deny the
- * use of priorities to JUMP and GROUP rules.
+ * Because of the above workaround, deny the use
+ * of priorities to TUNNEL and SWITCH rules.
*/
rc = rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, NULL,
- "tunnel offload: priorities are not supported");
+ "FT: priorities are not supported");
goto fail_priority_check;
}
/*
* sfc_mae_rule_encap_parse_init() may have detected tunnel offload
- * GROUP rule. Remember its properties for later use.
+ * SWITCH rule. Remember its properties for later use.
*/
spec->ft_rule_type = ctx_mae.ft_rule_type;
- spec->ft = ctx_mae.ft;
+ spec->ft_ctx = ctx_mae.ft_ctx;
rc = sfc_flow_parse_pattern(sa, sfc_flow_items, RTE_DIM(sfc_flow_items),
ctx_mae.pattern, &ctx, error);
{
int rc;
- if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
/* Workaround. See sfc_flow_parse_rte_to_mae() */
} else if (conf->id > SFC_FT_USER_MARK_MASK) {
sfc_err(sa, "the mark value is too large");
SFC_BUILD_SET_OVERFLOW(RTE_FLOW_ACTION_TYPE_MARK,
bundle->actions_mask);
if ((rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
- spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+ spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
rc = sfc_mae_rule_parse_action_mark(sa, action->conf,
spec_mae, spec);
} else {
rc = efx_mae_action_set_populate_drop(spec);
break;
case RTE_FLOW_ACTION_TYPE_JUMP:
- if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
/* Workaround. See sfc_flow_parse_rte_to_mae() */
break;
}
++(ctx.n_counters);
}
- if (spec_mae->ft_rule_type == SFC_FT_RULE_GROUP) {
- /* JUMP rules don't decapsulate packets. GROUP rules do. */
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_SWITCH) {
+ /* TUNNEL rules don't decapsulate packets. SWITCH rules do. */
rc = efx_mae_action_set_populate_decap(ctx.spec);
if (rc != 0)
goto fail_enforce_ft_decap;
switch (spec_mae->ft_rule_type) {
case SFC_FT_RULE_NONE:
break;
- case SFC_FT_RULE_JUMP:
+ case SFC_FT_RULE_TUNNEL:
/* Workaround. See sfc_flow_parse_rte_to_mae() */
rc = sfc_mae_rule_parse_action_pf_vf(sa, NULL, ctx.spec);
if (rc != 0)
- goto fail_workaround_jump_delivery;
+ goto fail_workaround_tunnel_delivery;
- ctx.counter_ft = spec_mae->ft;
+ ctx.counter_ft_ctx = spec_mae->ft_ctx;
break;
- case SFC_FT_RULE_GROUP:
+ case SFC_FT_RULE_SWITCH:
/*
* Packets that go to the rule's AR have FT mark set (from the
- * JUMP rule OR's RECIRC_ID). Remove this mark in matching
+ * TUNNEL rule OR's RECIRC_ID). Remove this mark in matching
* packets. The user may have provided their own action
* MARK above, so don't check the return value here.
*/
(void)efx_mae_action_set_populate_mark(ctx.spec, 0);
- ctx.ft_group_hit_counter = &spec_mae->ft->group_hit_counter;
+ ctx.ft_switch_hit_counter =
+ &spec_mae->ft_ctx->switch_hit_counter;
break;
default:
SFC_ASSERT(B_FALSE);
return 0;
fail_action_set_add:
-fail_workaround_jump_delivery:
+fail_workaround_tunnel_delivery:
fail_nb_count:
sfc_mae_encap_header_del(sa, ctx.encap_header);
goto fail_outer_rule_enable;
}
- if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
- spec_mae->ft->reset_jump_hit_counter =
- spec_mae->ft->group_hit_counter;
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
+ spec_mae->ft_ctx->reset_tunnel_hit_counter =
+ spec_mae->ft_ctx->switch_hit_counter;
}
if (action_set == NULL) {
/* RTE counter ID validity status */
bool rte_id_valid;
- /* Flow Tunnel (FT) GROUP hit counter (or NULL) */
- uint64_t *ft_group_hit_counter;
- /* Flow Tunnel (FT) context (for JUMP rules; otherwise, NULL) */
- struct sfc_flow_tunnel *ft;
+ /* Flow Tunnel (FT) SWITCH hit counter (or NULL) */
+ uint64_t *ft_switch_hit_counter;
+ /* Flow Tunnel (FT) context (for TUNNEL rules; otherwise, NULL) */
+ struct sfc_ft_ctx *ft_ctx;
};
/** Action set registry entry */
union sfc_pkts_bytes value;
union sfc_pkts_bytes reset;
- uint64_t *ft_group_hit_counter;
+ uint64_t *ft_switch_hit_counter;
};
struct sfc_mae_counters_xstats {
size_t tunnel_def_mask_size;
const void *tunnel_def_mask;
bool match_mport_set;
- enum sfc_flow_tunnel_rule_type ft_rule_type;
+ enum sfc_ft_rule_type ft_rule_type;
struct sfc_mae_pattern_data pattern_data;
efx_tunnel_protocol_t encap_type;
const struct rte_flow_item *pattern;
unsigned int priority;
- struct sfc_flow_tunnel *ft;
+ struct sfc_ft_ctx *ft_ctx;
};
int sfc_mae_attach(struct sfc_adapter *sa);
&p->value.pkts_bytes.int128, __ATOMIC_RELAXED);
p->generation_count = generation_count;
- p->ft_group_hit_counter = counterp->ft_group_hit_counter;
+ p->ft_switch_hit_counter = counterp->ft_switch_hit_counter;
/*
* The flag is set at the very end of add operation and reset
__atomic_store(&p->value.pkts_bytes,
&cnt_val.pkts_bytes, __ATOMIC_RELAXED);
- if (p->ft_group_hit_counter != NULL) {
- uint64_t ft_group_hit_counter;
+ if (p->ft_switch_hit_counter != NULL) {
+ uint64_t ft_switch_hit_counter;
- ft_group_hit_counter = *p->ft_group_hit_counter + pkts;
- __atomic_store_n(p->ft_group_hit_counter, ft_group_hit_counter,
+ ft_switch_hit_counter = *p->ft_switch_hit_counter + pkts;
+ __atomic_store_n(p->ft_switch_hit_counter, ft_switch_hit_counter,
__ATOMIC_RELAXED);
}
const struct sfc_mae_counter_id *counter,
struct rte_flow_query_count *data)
{
- struct sfc_flow_tunnel *ft = counter->ft;
- uint64_t non_reset_jump_hit_counter;
+ struct sfc_ft_ctx *ft_ctx = counter->ft_ctx;
+ uint64_t non_reset_tunnel_hit_counter;
struct sfc_mae_counter *p;
union sfc_pkts_bytes value;
data->hits_set = 1;
data->hits = value.pkts - p->reset.pkts;
- if (ft != NULL) {
- data->hits += ft->group_hit_counter;
- non_reset_jump_hit_counter = data->hits;
- data->hits -= ft->reset_jump_hit_counter;
+ if (ft_ctx != NULL) {
+ data->hits += ft_ctx->switch_hit_counter;
+ non_reset_tunnel_hit_counter = data->hits;
+ data->hits -= ft_ctx->reset_tunnel_hit_counter;
} else {
data->bytes_set = 1;
data->bytes = value.bytes - p->reset.bytes;
}
if (data->reset != 0) {
- if (ft != NULL) {
- ft->reset_jump_hit_counter = non_reset_jump_hit_counter;
+ if (ft_ctx != NULL) {
+ ft_ctx->reset_tunnel_hit_counter =
+ non_reset_tunnel_hit_counter;
} else {
p->reset.pkts = value.pkts;
p->reset.bytes = value.bytes;
rxq_info->type_flags |= EFX_RXQ_FLAG_USER_FLAG;
if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
- sfc_flow_tunnel_is_active(sa))
+ sfc_ft_is_active(sa))
rxq_info->type_flags |= EFX_RXQ_FLAG_USER_MARK;
rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
info.batch_max = encp->enc_rx_batch_max;
info.prefix_size = encp->enc_rx_prefix_size;
- if (sfc_flow_tunnel_is_active(sa))
+ if (sfc_ft_is_active(sa))
info.user_mark_mask = SFC_FT_USER_MARK_MASK;
else
info.user_mark_mask = UINT32_MAX;