#include "sfc_rx.h"
#include "sfc_filter.h"
#include "sfc_flow.h"
+#include "sfc_flow_rss.h"
#include "sfc_flow_tunnel.h"
#include "sfc_log.h"
#include "sfc_dp_rx.h"
#include "sfc_mae_counter.h"
+#include "sfc_switch.h"
struct sfc_flow_ops_by_spec {
sfc_flow_parse_cb_t *parse;
static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_mae;
static sfc_flow_insert_cb_t sfc_flow_filter_insert;
static sfc_flow_remove_cb_t sfc_flow_filter_remove;
+static sfc_flow_cleanup_cb_t sfc_flow_cleanup;
static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
.parse = sfc_flow_parse_rte_to_filter,
.verify = NULL,
- .cleanup = NULL,
+ .cleanup = sfc_flow_cleanup,
.insert = sfc_flow_filter_insert,
.remove = sfc_flow_filter_remove,
.query = NULL,
const struct rte_flow_item_vlan *spec = NULL;
const struct rte_flow_item_vlan *mask = NULL;
const struct rte_flow_item_vlan supp_mask = {
- .tci = rte_cpu_to_be_16(ETH_VLAN_ID_MAX),
+ .tci = rte_cpu_to_be_16(RTE_ETH_VLAN_ID_MAX),
.inner_type = RTE_BE16(0xffff),
};
"Groups are not supported");
return -rte_errno;
}
- if (attr->egress != 0) {
+ if (attr->egress != 0 && attr->transfer == 0) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
"Egress is not supported");
return -rte_errno;
}
- if (attr->ingress == 0) {
+ if (attr->ingress == 0 && attr->transfer == 0) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
"Ingress is compulsory");
spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
spec_filter->template.efs_priority = EFX_FILTER_PRI_MANUAL;
} else {
- if (mae->status != SFC_MAE_STATUS_SUPPORTED) {
+ if (mae->status != SFC_MAE_STATUS_ADMIN) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
attr, "Transfer is not supported");
spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
rxq_info = &sfc_sa2shared(sa)->rxq_info[queue->index];
- spec_filter->rss_hash_required = !!(rxq_info->rxq_flags &
- SFC_RXQ_FLAG_RSS_HASH);
+
+ if ((rxq_info->rxq_flags & SFC_RXQ_FLAG_RSS_HASH) != 0) {
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ struct sfc_rss *ethdev_rss = &sas->rss;
+
+ spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ spec_filter->rss_ctx = ðdev_rss->dummy_ctx;
+ }
return 0;
}
const struct rte_flow_action_rss *action_rss,
struct rte_flow *flow)
{
- struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- struct sfc_rss *rss = &sas->rss;
- sfc_ethdev_qid_t ethdev_qid;
+ struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
+ struct sfc_flow_rss_conf conf;
+ uint16_t sw_qid_min;
struct sfc_rxq *rxq;
- unsigned int rxq_hw_index_min;
- unsigned int rxq_hw_index_max;
- efx_rx_hash_type_t efx_hash_types;
- const uint8_t *rss_key;
- struct sfc_flow_spec *spec = &flow->spec;
- struct sfc_flow_spec_filter *spec_filter = &spec->filter;
- struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
- unsigned int i;
-
- if (action_rss->queue_num == 0)
- return -EINVAL;
-
- ethdev_qid = sfc_sa2shared(sa)->ethdev_rxq_count - 1;
- rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
- rxq_hw_index_min = rxq->hw_index;
- rxq_hw_index_max = 0;
-
- for (i = 0; i < action_rss->queue_num; ++i) {
- ethdev_qid = action_rss->queue[i];
-
- if ((unsigned int)ethdev_qid >=
- sfc_sa2shared(sa)->ethdev_rxq_count)
- return -EINVAL;
-
- rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
-
- if (rxq->hw_index < rxq_hw_index_min)
- rxq_hw_index_min = rxq->hw_index;
+ int rc;
- if (rxq->hw_index > rxq_hw_index_max)
- rxq_hw_index_max = rxq->hw_index;
- }
+ spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX_RSS;
- switch (action_rss->func) {
- case RTE_ETH_HASH_FUNCTION_DEFAULT:
- case RTE_ETH_HASH_FUNCTION_TOEPLITZ:
- break;
- default:
- return -EINVAL;
- }
+ rc = sfc_flow_rss_parse_conf(sa, action_rss, &conf, &sw_qid_min);
+ if (rc != 0)
+ return -rc;
- if (action_rss->level)
- return -EINVAL;
+ rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, sw_qid_min);
+ spec_filter->template.efs_dmaq_id = rxq->hw_index;
- /*
- * Dummy RSS action with only one queue and no specific settings
- * for hash types and key does not require dedicated RSS context
- * and may be simplified to single queue action.
- */
- if (action_rss->queue_num == 1 && action_rss->types == 0 &&
- action_rss->key_len == 0) {
- spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
+ spec_filter->rss_ctx = sfc_flow_rss_ctx_reuse(sa, &conf, sw_qid_min,
+ action_rss->queue);
+ if (spec_filter->rss_ctx != NULL)
return 0;
- }
-
- if (action_rss->types) {
- int rc;
- rc = sfc_rx_hf_rte_to_efx(sa, action_rss->types,
- &efx_hash_types);
- if (rc != 0)
- return -rc;
- } else {
- unsigned int i;
-
- efx_hash_types = 0;
- for (i = 0; i < rss->hf_map_nb_entries; ++i)
- efx_hash_types |= rss->hf_map[i].efx;
- }
-
- if (action_rss->key_len) {
- if (action_rss->key_len != sizeof(rss->key))
- return -EINVAL;
-
- rss_key = action_rss->key;
- } else {
- rss_key = rss->key;
- }
-
- spec_filter->rss = B_TRUE;
-
- sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
- sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
- sfc_rss_conf->rss_hash_types = efx_hash_types;
- rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(rss->key));
-
- for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
- unsigned int nb_queues = action_rss->queue_num;
- struct sfc_rxq *rxq;
-
- ethdev_qid = action_rss->queue[i % nb_queues];
- rxq = sfc_rxq_ctrl_by_ethdev_qid(sa, ethdev_qid);
- sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
- }
+ rc = sfc_flow_rss_ctx_add(sa, &conf, sw_qid_min, action_rss->queue,
+ &spec_filter->rss_ctx);
+ if (rc != 0)
+ return -rc;
return 0;
}
sfc_flow_filter_insert(struct sfc_adapter *sa,
struct rte_flow *flow)
{
- struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- struct sfc_rss *rss = &sas->rss;
struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
- struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
- uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
- boolean_t create_context;
- unsigned int i;
+ struct sfc_flow_rss_ctx *rss_ctx = spec_filter->rss_ctx;
int rc = 0;
- create_context = spec_filter->rss || (spec_filter->rss_hash_required &&
- rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT);
-
- if (create_context) {
- unsigned int rss_spread;
- unsigned int rss_hash_types;
- uint8_t *rss_key;
-
- if (spec_filter->rss) {
- rss_spread = MIN(flow_rss->rxq_hw_index_max -
- flow_rss->rxq_hw_index_min + 1,
- EFX_MAXRSS);
- rss_hash_types = flow_rss->rss_hash_types;
- rss_key = flow_rss->rss_key;
- } else {
- /*
- * Initialize dummy RSS context parameters to have
- * valid RSS hash. Use default RSS hash function and
- * key.
- */
- rss_spread = 1;
- rss_hash_types = rss->hash_types;
- rss_key = rss->key;
- }
-
- rc = efx_rx_scale_context_alloc(sa->nic,
- EFX_RX_SCALE_EXCLUSIVE,
- rss_spread,
- &efs_rss_context);
- if (rc != 0)
- goto fail_scale_context_alloc;
-
- rc = efx_rx_scale_mode_set(sa->nic, efs_rss_context,
- rss->hash_alg,
- rss_hash_types, B_TRUE);
- if (rc != 0)
- goto fail_scale_mode_set;
+ rc = sfc_flow_rss_ctx_program(sa, rss_ctx);
+ if (rc != 0)
+ goto fail_rss_ctx_program;
- rc = efx_rx_scale_key_set(sa->nic, efs_rss_context,
- rss_key, sizeof(rss->key));
- if (rc != 0)
- goto fail_scale_key_set;
- } else {
- efs_rss_context = rss->dummy_rss_context;
- }
+ if (rss_ctx != NULL) {
+ unsigned int i;
- if (spec_filter->rss || spec_filter->rss_hash_required) {
/*
* At this point, fully elaborated filter specifications
* have been produced from the template. To make sure that
for (i = 0; i < spec_filter->count; i++) {
efx_filter_spec_t *spec = &spec_filter->filters[i];
- spec->efs_rss_context = efs_rss_context;
- spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
- if (spec_filter->rss)
- spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
+ spec->efs_rss_context = rss_ctx->nic_handle;
}
}
if (rc != 0)
goto fail_filter_insert;
- if (create_context) {
- unsigned int dummy_tbl[RTE_DIM(flow_rss->rss_tbl)] = {0};
- unsigned int *tbl;
-
- tbl = spec_filter->rss ? flow_rss->rss_tbl : dummy_tbl;
-
- /*
- * Scale table is set after filter insertion because
- * the table entries are relative to the base RxQ ID
- * and the latter is submitted to the HW by means of
- * inserting a filter, so by the time of the request
- * the HW knows all the information needed to verify
- * the table entries, and the operation will succeed
- */
- rc = efx_rx_scale_tbl_set(sa->nic, efs_rss_context,
- tbl, RTE_DIM(flow_rss->rss_tbl));
- if (rc != 0)
- goto fail_scale_tbl_set;
-
- /* Remember created dummy RSS context */
- if (!spec_filter->rss)
- rss->dummy_rss_context = efs_rss_context;
- }
-
return 0;
-fail_scale_tbl_set:
- sfc_flow_spec_remove(sa, &flow->spec);
-
fail_filter_insert:
-fail_scale_key_set:
-fail_scale_mode_set:
- if (create_context)
- efx_rx_scale_context_free(sa->nic, efs_rss_context);
+ sfc_flow_rss_ctx_terminate(sa, rss_ctx);
-fail_scale_context_alloc:
+fail_rss_ctx_program:
return rc;
}
if (rc != 0)
return rc;
- if (spec_filter->rss) {
- /*
- * All specifications for a given flow rule have the same RSS
- * context, so that RSS context value is taken from the first
- * filter specification
- */
- efx_filter_spec_t *spec = &spec_filter->filters[0];
+ sfc_flow_rss_ctx_terminate(sa, spec_filter->rss_ctx);
- rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
- }
-
- return rc;
+ return 0;
}
static int
uint32_t mark_max;
mark_max = encp->enc_filter_action_mark_max;
- if (sfc_flow_tunnel_is_active(sa))
+ if (sfc_ft_is_active(sa))
mark_max = RTE_MIN(mark_max, SFC_FT_USER_MARK_MASK);
if (mark == NULL || mark->id > mark_max)
struct sfc_flow_spec_mae *spec_mae = &spec->mae;
int rc;
+ /*
+ * If the flow is meant to be a TUNNEL rule in a FT context,
+ * preparse its actions and save its properties in spec_mae.
+ */
+ rc = sfc_ft_tunnel_rule_detect(sa, actions, spec_mae, error);
+ if (rc != 0)
+ goto fail;
+
rc = sfc_mae_rule_parse_pattern(sa, pattern, spec_mae, error);
if (rc != 0)
- return rc;
+ goto fail;
+
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL) {
+ /*
+ * By design, this flow should be represented solely by the
+ * outer rule. But the HW/FW hasn't got support for setting
+ * Rx mark from RECIRC_ID on outer rule lookup yet. Neither
+ * does it support outer rule counters. As a workaround, an
+ * action rule of lower priority is used to do the job.
+ *
+ * So don't skip sfc_mae_rule_parse_actions() below.
+ */
+ }
rc = sfc_mae_rule_parse_actions(sa, actions, spec_mae, error);
if (rc != 0)
- return rc;
+ goto fail;
+
+ if (spec_mae->ft_ctx != NULL) {
+ if (spec_mae->ft_rule_type == SFC_FT_RULE_TUNNEL)
+ spec_mae->ft_ctx->tunnel_rule_is_set = B_TRUE;
+
+ ++(spec_mae->ft_ctx->refcnt);
+ }
return 0;
+
+fail:
+ /* Reset these values to avoid confusing sfc_mae_flow_cleanup(). */
+ spec_mae->ft_rule_type = SFC_FT_RULE_NONE;
+ spec_mae->ft_ctx = NULL;
+
+ return rc;
}
static int
return ret;
}
+static int
+sfc_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
+ uint16_t *transfer_proxy_port,
+ struct rte_flow_error *error)
+{
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ int ret;
+
+ ret = sfc_mae_get_switch_domain_admin(sa->mae.switch_domain_id,
+ transfer_proxy_port);
+ if (ret != 0) {
+ return rte_flow_error_set(error, ret,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, NULL);
+ }
+
+ return 0;
+}
+
const struct rte_flow_ops sfc_flow_ops = {
.validate = sfc_flow_validate,
.create = sfc_flow_create,
.flush = sfc_flow_flush,
.query = sfc_flow_query,
.isolate = sfc_flow_isolate,
+ .tunnel_decap_set = sfc_ft_decap_set,
+ .tunnel_match = sfc_ft_match,
+ .tunnel_action_decap_release = sfc_ft_action_decap_release,
+ .tunnel_item_release = sfc_ft_item_release,
+ .get_restore_info = sfc_ft_get_restore_info,
+ .pick_transfer_proxy = sfc_flow_pick_transfer_proxy,
};
void
void
sfc_flow_stop(struct sfc_adapter *sa)
{
- struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
- struct sfc_rss *rss = &sas->rss;
struct rte_flow *flow;
SFC_ASSERT(sfc_adapter_is_locked(sa));
TAILQ_FOREACH(flow, &sa->flow_list, entries)
sfc_flow_remove(sa, flow, NULL);
- if (rss->dummy_rss_context != EFX_RSS_CONTEXT_DEFAULT) {
- efx_rx_scale_context_free(sa->nic, rss->dummy_rss_context);
- rss->dummy_rss_context = EFX_RSS_CONTEXT_DEFAULT;
- }
-
/*
* MAE counter service is not stopped on flow rule remove to avoid
* extra work. Make sure that it is stopped here.
SFC_ASSERT(sfc_adapter_is_locked(sa));
+ sfc_ft_counters_reset(sa);
+
TAILQ_FOREACH(flow, &sa->flow_list, entries) {
rc = sfc_flow_insert(sa, flow, NULL);
if (rc != 0)
fail_bad_flow:
return rc;
}
+
+static void
+sfc_flow_cleanup(struct sfc_adapter *sa, struct rte_flow *flow)
+{
+ if (flow == NULL)
+ return;
+
+ sfc_flow_rss_ctx_del(sa, flow->spec.filter.rss_ctx);
+}