#include "sfc_rx.h"
#include "sfc_filter.h"
#include "sfc_flow.h"
+#include "sfc_flow_tunnel.h"
#include "sfc_log.h"
#include "sfc_dp_rx.h"
#include "sfc_mae_counter.h"
+#include "sfc_switch.h"
struct sfc_flow_ops_by_spec {
sfc_flow_parse_cb_t *parse;
const struct rte_flow_item_vlan *spec = NULL;
const struct rte_flow_item_vlan *mask = NULL;
const struct rte_flow_item_vlan supp_mask = {
- .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ .tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
.inner_type = RTE_BE16(0xffff),
};
spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
} else {
- if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
+ if (mae->status != SFC_MAE_STATUS_ADMIN) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
attr, "Transfer is not supported");
struct sfc_flow_spec *spec = &flow->spec;
struct sfc_flow_spec_filter *spec_filter = &spec->filter;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint32_t mark_max;
- if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
+ mark_max = encp->enc_filter_action_mark_max;
+ if (sfc_flow_tunnel_is_active(sa))
+ mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
+
+ if (mark == NULL || mark->id > mark_max)
return EINVAL;
spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
struct sfc_flow_spec_mae *spec_mae = &spec->mae;
int rc;
+ /*
+ * If the flow is meant to be a JUMP rule in tunnel offload,
+ * preparse its actions and save its properties in spec_mae.
+ */
+ rc = sfc_flow_tunnel_detect_jump_rule(sa, actions, spec_mae, error);
+ if (rc != 0)
+ goto fail;
+
rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
if (rc != 0)
- return rc;
+ goto fail;
+
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP) {
+ /*
+ * By design, this flow should be represented solely by the
+ * outer rule. But the HW/FW hasn't got support for setting
+ * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
+ * does it support outer rule counters. As a workaround, an
+ * action rule of lower priority is used to do the job.
+ *
+ * So don't skip sfc_mae_rule_parse_actions() below.
+ */
+ }
rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
if (rc != 0)
- return rc;
+ goto fail;
+
+ if (spec_mae->ft != NULL) {
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_JUMP)
+ spec_mae->ft->jump_rule_is_set = B_TRUE;
+
+ ++(spec_mae->ft->refcnt);
+ }
return 0;
+
+fail:
+ /* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
+ spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
+ spec_mae->ft = NULL;
+
+ return rc;
}
static int
return ret;
}
+static int
+sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
+ uint16_t *transfer_proxy_port,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ int ret;
+
+ ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
+ transfer_proxy_port);
+ if (ret != 0) {
+ return rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+ }
+
+ return 0;
+}
+
const struct rte_flow_ops sfc_flow_ops = {
.validate = sfc_flow_validate,
.create = sfc_flow_create,
.flush = sfc_flow_flush,
.query = sfc_flow_query,
.isolate = sfc_flow_isolate,
+ .tunnel_decap_set = sfc_flow_tunnel_decap_set,
+ .tunnel_match = sfc_flow_tunnel_match,
+ .tunnel_action_decap_release = sfc_flow_tunnel_action_decap_release,
+ .tunnel_item_release = sfc_flow_tunnel_item_release,
+ .get_restore_info = sfc_flow_tunnel_get_restore_info,
+ .pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
};
void
SFC_ASSERT(sfc_adapter_is_locked(sa));
+ sfc_flow_tunnel_reset_hit_counters(sa);
+
TAILQ_FOREACH(flow, &sa->flow_list, entries) {
rc = sfc_flow_insert(sa, flow, NULL);
if (rc != 0)