#include <rte_tailq.h>
#include <rte_common.h>
#include <rte_ethdev_driver.h>
-#include <rte_eth_ctrl.h>
#include <rte_ether.h>
#include <rte_flow.h>
#include <rte_flow_driver.h>
#include "sfc_log.h"
#include "sfc_dp_rx.h"
+struct sfc_flow_ops_by_spec {
+ sfc_flow_parse_cb_t *parse;
+ sfc_flow_insert_cb_t *insert;
+ sfc_flow_remove_cb_t *remove;
+};
+
+static sfc_flow_parse_cb_t sfc_flow_parse_rte_to_filter;
+static sfc_flow_insert_cb_t sfc_flow_filter_insert;
+static sfc_flow_remove_cb_t sfc_flow_filter_remove;
+
+static const struct sfc_flow_ops_by_spec sfc_flow_ops_filter = {
+ .parse = sfc_flow_parse_rte_to_filter,
+ .insert = sfc_flow_filter_insert,
+ .remove = sfc_flow_filter_remove,
+};
+
+static const struct sfc_flow_ops_by_spec *
+sfc_flow_get_ops_by_spec(struct rte_flow *flow)
+{
+ struct sfc_flow_spec *spec = &flow->spec;
+ const struct sfc_flow_ops_by_spec *ops = NULL;
+
+ switch (spec->type) {
+ case SFC_FLOW_SPEC_FILTER:
+ ops = &sfc_flow_ops_filter;
+ break;
+ default:
+ SFC_ASSERT(false);
+ break;
+ }
+
+ return ops;
+}
+
/*
- * At now flow API is implemented in such a manner that each
- * flow rule is converted to one or more hardware filters.
+ * Currently, filter-based (VNIC) flow API is implemented in such a manner
+ * that each flow rule is converted to one or more hardware filters.
* All elements of flow rule (attributes, pattern items, actions)
* correspond to one or more fields in the efx_filter_spec_s structure
* that is responsible for the hardware filter.
static sfc_flow_spec_set_vals sfc_flow_set_ethertypes;
static sfc_flow_spec_set_vals sfc_flow_set_ifrm_unknown_dst_flags;
static sfc_flow_spec_check sfc_flow_check_ifrm_unknown_dst_flags;
+static sfc_flow_spec_set_vals sfc_flow_set_outer_vid_flag;
+static sfc_flow_spec_check sfc_flow_check_outer_vid_flag;
static boolean_t
sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
if (spec == NULL)
return 0;
- if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
+ if (rte_is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
efx_spec->efs_match_flags |= is_ifrm ?
EFX_FILTER_MATCH_IFRM_LOC_MAC :
EFX_FILTER_MATCH_LOC_MAC;
EFX_MAC_ADDR_LEN);
} else if (memcmp(mask->dst.addr_bytes, ig_mask,
EFX_MAC_ADDR_LEN) == 0) {
- if (is_unicast_ether_addr(&spec->dst))
+ if (rte_is_unicast_ether_addr(&spec->dst))
efx_spec->efs_match_flags |= is_ifrm ?
EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
efx_spec->efs_match_flags |= is_ifrm ?
EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
- } else if (!is_zero_ether_addr(&mask->dst)) {
+ } else if (!rte_is_zero_ether_addr(&mask->dst)) {
goto fail_bad_mask;
}
* ethertype masks are equal to zero in inner frame,
* so these fields are filled in only for the outer frame
*/
- if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
+ if (rte_is_same_ether_addr(&mask->src, &supp_mask.src)) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
EFX_MAC_ADDR_LEN);
- } else if (!is_zero_ether_addr(&mask->src)) {
+ } else if (!rte_is_zero_ether_addr(&mask->src)) {
goto fail_bad_mask;
}
* the outer tag and the next matches the inner tag.
*/
if (mask->tci == supp_mask.tci) {
- vid = rte_bswap16(spec->tci);
+ /* Apply mask to keep VID only */
+ vid = rte_bswap16(spec->tci & mask->tci);
if (!(efx_spec->efs_match_flags &
EFX_FILTER_MATCH_OUTER_VID)) {
return 0;
if (mask->protocol == supp_mask.protocol) {
- if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
+ if (spec->protocol != rte_cpu_to_be_16(RTE_ETHER_TYPE_TEB)) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"GENEVE encap. protocol must be Ethernet "
struct rte_flow *flow,
struct rte_flow_error *error)
{
+ struct sfc_flow_spec *spec = &flow->spec;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
+
if (attr == NULL) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ATTR, NULL,
"Groups are not supported");
return -rte_errno;
}
- if (attr->priority != 0) {
- rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY, attr,
- "Priorities are not supported");
- return -rte_errno;
- }
if (attr->egress != 0) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attr,
"Egress is not supported");
return -rte_errno;
}
- if (attr->transfer != 0) {
+ if (attr->ingress == 0) {
rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
- "Transfer is not supported");
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
+ "Ingress is compulsory");
return -rte_errno;
}
- if (attr->ingress == 0) {
+ if (attr->transfer == 0) {
+ if (attr->priority != 0) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
+ attr, "Priorities are unsupported");
+ return -rte_errno;
+ }
+ spec->type = SFC_FLOW_SPEC_FILTER;
+ spec_filter->template.efs_flags |= EFX_FILTER_FLAG_RX;
+ spec_filter->template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
+ } else {
rte_flow_error_set(error, ENOTSUP,
- RTE_FLOW_ERROR_TYPE_ATTR_INGRESS, attr,
- "Only ingress is supported");
+ RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER, attr,
+ "Transfer is not supported");
return -rte_errno;
}
- flow->spec.template.efs_flags |= EFX_FILTER_FLAG_RX;
- flow->spec.template.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
-
return 0;
}
unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
boolean_t is_ifrm = B_FALSE;
const struct sfc_flow_item *item;
+ struct sfc_flow_spec *spec = &flow->spec;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
if (pattern == NULL) {
rte_flow_error_set(error, EINVAL,
break;
}
- rc = item->parse(pattern, &flow->spec.template, error);
+ rc = item->parse(pattern, &spec_filter->template, error);
if (rc != 0)
return rc;
const struct rte_flow_action_queue *queue,
struct rte_flow *flow)
{
+ struct sfc_flow_spec *spec = &flow->spec;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
struct sfc_rxq *rxq;
- if (queue->index >= sa->rxq_count)
+ if (queue->index >= sfc_sa2shared(sa)->rxq_count)
return -EINVAL;
- rxq = sa->rxq_info[queue->index].rxq;
- flow->spec.template.efs_dmaq_id = (uint16_t)rxq->hw_index;
+ rxq = &sa->rxq_ctrl[queue->index];
+ spec_filter->template.efs_dmaq_id = (uint16_t)rxq->hw_index;
return 0;
}
const struct rte_flow_action_rss *action_rss,
struct rte_flow *flow)
{
- struct sfc_rss *rss = &sa->rss;
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ struct sfc_rss *rss = &sas->rss;
unsigned int rxq_sw_index;
struct sfc_rxq *rxq;
unsigned int rxq_hw_index_min;
unsigned int rxq_hw_index_max;
efx_rx_hash_type_t efx_hash_types;
const uint8_t *rss_key;
- struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
+ struct sfc_flow_spec *spec = &flow->spec;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
+ struct sfc_flow_rss *sfc_rss_conf = &spec_filter->rss_conf;
unsigned int i;
if (action_rss->queue_num == 0)
return -EINVAL;
- rxq_sw_index = sa->rxq_count - 1;
- rxq = sa->rxq_info[rxq_sw_index].rxq;
+ rxq_sw_index = sfc_sa2shared(sa)->rxq_count - 1;
+ rxq = &sa->rxq_ctrl[rxq_sw_index];
rxq_hw_index_min = rxq->hw_index;
rxq_hw_index_max = 0;
for (i = 0; i < action_rss->queue_num; ++i) {
rxq_sw_index = action_rss->queue[i];
- if (rxq_sw_index >= sa->rxq_count)
+ if (rxq_sw_index >= sfc_sa2shared(sa)->rxq_count)
return -EINVAL;
- rxq = sa->rxq_info[rxq_sw_index].rxq;
+ rxq = &sa->rxq_ctrl[rxq_sw_index];
if (rxq->hw_index < rxq_hw_index_min)
rxq_hw_index_min = rxq->hw_index;
*/
if (action_rss->queue_num == 1 && action_rss->types == 0 &&
action_rss->key_len == 0) {
- flow->spec.template.efs_dmaq_id = rxq_hw_index_min;
+ spec_filter->template.efs_dmaq_id = rxq_hw_index_min;
return 0;
}
rss_key = rss->key;
}
- flow->rss = B_TRUE;
+ spec_filter->rss = B_TRUE;
sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
unsigned int nb_queues = action_rss->queue_num;
unsigned int rxq_sw_index = action_rss->queue[i % nb_queues];
- struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
+ struct sfc_rxq *rxq = &sa->rxq_ctrl[rxq_sw_index];
sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
}
sfc_flow_spec_flush(struct sfc_adapter *sa, struct sfc_flow_spec *spec,
unsigned int filters_count)
{
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
unsigned int i;
int ret = 0;
for (i = 0; i < filters_count; i++) {
int rc;
- rc = efx_filter_remove(sa->nic, &spec->filters[i]);
+ rc = efx_filter_remove(sa->nic, &spec_filter->filters[i]);
if (ret == 0 && rc != 0) {
sfc_err(sa, "failed to remove filter specification "
"(rc = %d)", rc);
static int
sfc_flow_spec_insert(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
{
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
unsigned int i;
int rc = 0;
- for (i = 0; i < spec->count; i++) {
- rc = efx_filter_insert(sa->nic, &spec->filters[i]);
+ for (i = 0; i < spec_filter->count; i++) {
+ rc = efx_filter_insert(sa->nic, &spec_filter->filters[i]);
if (rc != 0) {
sfc_flow_spec_flush(sa, spec, i);
break;
static int
sfc_flow_spec_remove(struct sfc_adapter *sa, struct sfc_flow_spec *spec)
{
- return sfc_flow_spec_flush(sa, spec, spec->count);
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
+
+ return sfc_flow_spec_flush(sa, spec, spec_filter->count);
}
static int
sfc_flow_filter_insert(struct sfc_adapter *sa,
struct rte_flow *flow)
{
- struct sfc_rss *rss = &sa->rss;
- struct sfc_flow_rss *flow_rss = &flow->rss_conf;
+ struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
+ struct sfc_rss *rss = &sas->rss;
+ struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
+ struct sfc_flow_rss *flow_rss = &spec_filter->rss_conf;
uint32_t efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
unsigned int i;
int rc = 0;
- if (flow->rss) {
+ if (spec_filter->rss) {
unsigned int rss_spread = MIN(flow_rss->rxq_hw_index_max -
flow_rss->rxq_hw_index_min + 1,
EFX_MAXRSS);
* RSS behaviour is consistent between them, set the same
* RSS context value everywhere.
*/
- for (i = 0; i < flow->spec.count; i++) {
- efx_filter_spec_t *spec = &flow->spec.filters[i];
+ for (i = 0; i < spec_filter->count; i++) {
+ efx_filter_spec_t *spec = &spec_filter->filters[i];
spec->efs_rss_context = efs_rss_context;
spec->efs_dmaq_id = flow_rss->rxq_hw_index_min;
if (rc != 0)
goto fail_filter_insert;
- if (flow->rss) {
+ if (spec_filter->rss) {
/*
* Scale table is set after filter insertion because
* the table entries are relative to the base RxQ ID
sfc_flow_filter_remove(struct sfc_adapter *sa,
struct rte_flow *flow)
{
+ struct sfc_flow_spec_filter *spec_filter = &flow->spec.filter;
int rc = 0;
rc = sfc_flow_spec_remove(sa, &flow->spec);
if (rc != 0)
return rc;
- if (flow->rss) {
+ if (spec_filter->rss) {
/*
* All specifications for a given flow rule have the same RSS
* context, so that RSS context value is taken from the first
* filter specification
*/
- efx_filter_spec_t *spec = &flow->spec.filters[0];
+ efx_filter_spec_t *spec = &spec_filter->filters[0];
rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
}
const struct rte_flow_action_mark *mark,
struct rte_flow *flow)
{
+ struct sfc_flow_spec *spec = &flow->spec;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
if (mark == NULL || mark->id > encp->enc_filter_action_mark_max)
return EINVAL;
- flow->spec.template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
- flow->spec.template.efs_mark = mark->id;
+ spec_filter->template.efs_flags |= EFX_FILTER_FLAG_ACTION_MARK;
+ spec_filter->template.efs_mark = mark->id;
return 0;
}
struct rte_flow_error *error)
{
int rc;
- const unsigned int dp_rx_features = sa->dp_rx->features;
+ struct sfc_flow_spec *spec = &flow->spec;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
+ const unsigned int dp_rx_features = sa->priv.dp_rx->features;
uint32_t actions_set = 0;
const uint32_t fate_actions_mask = (1UL << RTE_FLOW_ACTION_TYPE_QUEUE) |
(1UL << RTE_FLOW_ACTION_TYPE_RSS) |
rc = sfc_flow_parse_rss(sa, actions->conf, flow);
if (rc != 0) {
- rte_flow_error_set(error, rc,
+ rte_flow_error_set(error, -rc,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
"Bad RSS action");
return -rte_errno;
if ((actions_set & fate_actions_mask) != 0)
goto fail_fate_actions;
- flow->spec.template.efs_dmaq_id =
+ spec_filter->template.efs_dmaq_id =
EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
break;
return -rte_errno;
}
- flow->spec.template.efs_flags |=
+ spec_filter->template.efs_flags |=
EFX_FILTER_FLAG_ACTION_FLAG;
break;
/* When fate is unknown, drop traffic. */
if ((actions_set & fate_actions_mask) == 0) {
- flow->spec.template.efs_dmaq_id =
+ spec_filter->template.efs_dmaq_id =
EFX_FILTER_SPEC_RX_DMAQ_ID_DROP;
}
struct rte_flow_error *error)
{
unsigned int i;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
static const efx_filter_match_flags_t vals[] = {
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST,
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST
};
- if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Number of specifications is incorrect while copying "
return -rte_errno;
}
- for (i = 0; i < spec->count; i++) {
+ for (i = 0; i < spec_filter->count; i++) {
/* The check above ensures that divisor can't be zero here */
- spec->filters[i].efs_match_flags |=
+ spec_filter->filters[i].efs_match_flags |=
vals[i / filters_count_for_one_val];
}
struct rte_flow_error *error)
{
unsigned int i;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
static const uint16_t vals[] = {
EFX_ETHER_TYPE_IPV4, EFX_ETHER_TYPE_IPV6
};
- if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Number of specifications is incorrect "
return -rte_errno;
}
- for (i = 0; i < spec->count; i++) {
- spec->filters[i].efs_match_flags |=
+ for (i = 0; i < spec_filter->count; i++) {
+ spec_filter->filters[i].efs_match_flags |=
EFX_FILTER_MATCH_ETHER_TYPE;
/*
* The check above ensures that
* filters_count_for_one_val is not 0
*/
- spec->filters[i].efs_ether_type =
+ spec_filter->filters[i].efs_ether_type =
vals[i / filters_count_for_one_val];
}
return 0;
}
+/**
+ * Set the EFX_FILTER_MATCH_OUTER_VID match flag with value 0
+ * in the same specifications after copying.
+ *
+ * @param spec[in, out]
+ * SFC flow specification to update.
+ * @param filters_count_for_one_val[in]
+ * How many specifications should have the same match flag, what is the
+ * number of specifications before copying.
+ * @param error[out]
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_set_outer_vid_flag(struct sfc_flow_spec *spec,
+ unsigned int filters_count_for_one_val,
+ struct rte_flow_error *error)
+{
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
+ unsigned int i;
+
+ if (filters_count_for_one_val != spec_filter->count) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Number of specifications is incorrect "
+ "while copying by outer VLAN ID");
+ return -rte_errno;
+ }
+
+ for (i = 0; i < spec_filter->count; i++) {
+ spec_filter->filters[i].efs_match_flags |=
+ EFX_FILTER_MATCH_OUTER_VID;
+
+ spec_filter->filters[i].efs_outer_vid = 0;
+ }
+
+ return 0;
+}
+
/**
* Set the EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST and
* EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST match flags in the same
struct rte_flow_error *error)
{
unsigned int i;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
static const efx_filter_match_flags_t vals[] = {
EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST,
EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST
};
- if (filters_count_for_one_val * RTE_DIM(vals) != spec->count) {
+ if (filters_count_for_one_val * RTE_DIM(vals) != spec_filter->count) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"Number of specifications is incorrect while copying "
return -rte_errno;
}
- for (i = 0; i < spec->count; i++) {
+ for (i = 0; i < spec_filter->count; i++) {
/* The check above ensures that divisor can't be zero here */
- spec->filters[i].efs_match_flags |=
+ spec_filter->filters[i].efs_match_flags |=
vals[i / filters_count_for_one_val];
}
return B_FALSE;
}
+/**
+ * Check that the list of supported filters has a filter that differs
+ * from @p match in that it has no flag EFX_FILTER_MATCH_OUTER_VID
+ * in this case that filter will be used and the flag
+ * EFX_FILTER_MATCH_OUTER_VID is not needed.
+ *
+ * @param match[in]
+ * The match flags of filter.
+ * @param spec[in]
+ * Specification to be supplemented.
+ * @param filter[in]
+ * SFC filter with list of supported filters.
+ */
+static boolean_t
+sfc_flow_check_outer_vid_flag(efx_filter_match_flags_t match,
+ __rte_unused efx_filter_spec_t *spec,
+ struct sfc_filter *filter)
+{
+ unsigned int i;
+ efx_filter_match_flags_t match_without_vid =
+ match & ~EFX_FILTER_MATCH_OUTER_VID;
+
+ for (i = 0; i < filter->supported_match_num; i++) {
+ if (match_without_vid == filter->supported_match[i])
+ return B_FALSE;
+ }
+
+ return B_TRUE;
+}
+
/*
* Match flags that can be automatically added to filters.
* Selecting the last minimum when searching for the copy flag ensures that the
.set_vals = sfc_flow_set_ifrm_unknown_dst_flags,
.spec_check = sfc_flow_check_ifrm_unknown_dst_flags,
},
+ {
+ .flag = EFX_FILTER_MATCH_OUTER_VID,
+ .vals_count = 1,
+ .set_vals = sfc_flow_set_outer_vid_flag,
+ .spec_check = sfc_flow_check_outer_vid_flag,
+ },
};
/* Get item from array sfc_flow_copy_flags */
unsigned int new_filters_count;
unsigned int filters_count_for_one_val;
const struct sfc_flow_copy_flag *copy_flag;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
int rc;
copy_flag = sfc_flow_get_copy_flag(flag);
return -rte_errno;
}
- new_filters_count = spec->count * copy_flag->vals_count;
+ new_filters_count = spec_filter->count * copy_flag->vals_count;
if (new_filters_count > SF_FLOW_SPEC_NB_FILTERS_MAX) {
rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
}
/* Copy filters specifications */
- for (i = spec->count; i < new_filters_count; i++)
- spec->filters[i] = spec->filters[i - spec->count];
+ for (i = spec_filter->count; i < new_filters_count; i++) {
+ spec_filter->filters[i] =
+ spec_filter->filters[i - spec_filter->count];
+ }
- filters_count_for_one_val = spec->count;
- spec->count = new_filters_count;
+ filters_count_for_one_val = spec_filter->count;
+ spec_filter->count = new_filters_count;
rc = copy_flag->set_vals(spec, filters_count_for_one_val, error);
if (rc != 0)
struct sfc_flow_spec *spec,
struct rte_flow_error *error)
{
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
struct sfc_filter *filter = &sa->filter;
efx_filter_match_flags_t miss_flags;
efx_filter_match_flags_t min_miss_flags = 0;
unsigned int i;
int rc;
- match = spec->template.efs_match_flags;
+ match = spec_filter->template.efs_match_flags;
for (i = 0; i < filter->supported_match_num; i++) {
if ((match & filter->supported_match[i]) == match) {
miss_flags = filter->supported_match[i] & (~match);
multiplier = sfc_flow_check_missing_flags(miss_flags,
- &spec->template, filter);
+ &spec_filter->template, filter);
if (multiplier > 0) {
if (multiplier <= min_multiplier) {
min_multiplier = multiplier;
if (min_multiplier == UINT_MAX) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Flow rule pattern is not supported");
+ "The flow rule pattern is unsupported");
return -rte_errno;
}
* Check whether the spec maps to a hardware filter which is known to be
* ineffective despite being valid.
*
+ * @param filter[in]
+ * SFC filter with list of supported filters.
* @param spec[in]
* SFC flow specification.
*/
static boolean_t
-sfc_flow_is_match_flags_exception(struct sfc_flow_spec *spec)
+sfc_flow_is_match_flags_exception(struct sfc_filter *filter,
+ struct sfc_flow_spec *spec)
{
unsigned int i;
uint16_t ether_type;
uint8_t ip_proto;
efx_filter_match_flags_t match_flags;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
- for (i = 0; i < spec->count; i++) {
- match_flags = spec->filters[i].efs_match_flags;
+ for (i = 0; i < spec_filter->count; i++) {
+ match_flags = spec_filter->filters[i].efs_match_flags;
if (sfc_flow_is_match_with_vids(match_flags,
EFX_FILTER_MATCH_ETHER_TYPE) ||
sfc_flow_is_match_with_vids(match_flags,
EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_LOC_MAC)) {
- ether_type = spec->filters[i].efs_ether_type;
- if (ether_type == EFX_ETHER_TYPE_IPV4 ||
- ether_type == EFX_ETHER_TYPE_IPV6)
+ ether_type = spec_filter->filters[i].efs_ether_type;
+ if (filter->supports_ip_proto_or_addr_filter &&
+ (ether_type == EFX_ETHER_TYPE_IPV4 ||
+ ether_type == EFX_ETHER_TYPE_IPV6))
return B_TRUE;
} else if (sfc_flow_is_match_with_vids(match_flags,
EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_ETHER_TYPE |
EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_MAC)) {
- ip_proto = spec->filters[i].efs_ip_proto;
- if (ip_proto == EFX_IPPROTO_TCP ||
- ip_proto == EFX_IPPROTO_UDP)
+ ip_proto = spec_filter->filters[i].efs_ip_proto;
+ if (filter->supports_rem_or_local_port_filter &&
+ (ip_proto == EFX_IPPROTO_TCP ||
+ ip_proto == EFX_IPPROTO_UDP))
return B_TRUE;
}
}
struct rte_flow *flow,
struct rte_flow_error *error)
{
- efx_filter_spec_t *spec_tmpl = &flow->spec.template;
+ struct sfc_flow_spec *spec = &flow->spec;
+ struct sfc_flow_spec_filter *spec_filter = &spec->filter;
+ efx_filter_spec_t *spec_tmpl = &spec_filter->template;
efx_filter_match_flags_t match_flags = spec_tmpl->efs_match_flags;
int rc;
/* Initialize the first filter spec with template */
- flow->spec.filters[0] = *spec_tmpl;
- flow->spec.count = 1;
+ spec_filter->filters[0] = *spec_tmpl;
+ spec_filter->count = 1;
if (!sfc_filter_is_match_supported(sa, match_flags)) {
rc = sfc_flow_spec_filters_complete(sa, &flow->spec, error);
return rc;
}
- if (sfc_flow_is_match_flags_exception(&flow->spec)) {
+ if (sfc_flow_is_match_flags_exception(&sa->filter, &flow->spec)) {
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
"The flow rule pattern is unsupported");
}
static int
-sfc_flow_parse(struct rte_eth_dev *dev,
- const struct rte_flow_attr *attr,
- const struct rte_flow_item pattern[],
- const struct rte_flow_action actions[],
- struct rte_flow *flow,
- struct rte_flow_error *error)
+sfc_flow_parse_rte_to_filter(struct rte_eth_dev *dev,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
int rc;
- rc = sfc_flow_parse_attr(attr, flow, error);
- if (rc != 0)
- goto fail_bad_value;
-
rc = sfc_flow_parse_pattern(pattern, flow, error);
if (rc != 0)
goto fail_bad_value;
return rc;
}
+static int
+sfc_flow_parse(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item pattern[],
+ const struct rte_flow_action actions[],
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct sfc_flow_ops_by_spec *ops;
+ int rc;
+
+ rc = sfc_flow_parse_attr(attr, flow, error);
+ if (rc != 0)
+ return rc;
+
+ ops = sfc_flow_get_ops_by_spec(flow);
+ if (ops == NULL || ops->parse == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "No backend to handle this flow");
+ return -rte_errno;
+ }
+
+ return ops->parse(dev, pattern, actions, flow, error);
+}
+
+static struct rte_flow *
+sfc_flow_zmalloc(struct rte_flow_error *error)
+{
+ struct rte_flow *flow;
+
+ flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
+ if (flow == NULL) {
+ rte_flow_error_set(error, ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "Failed to allocate memory");
+ }
+
+ return flow;
+}
+
+static void
+sfc_flow_free(__rte_unused struct sfc_adapter *sa, struct rte_flow *flow)
+{
+ rte_free(flow);
+}
+
+static int
+sfc_flow_insert(struct sfc_adapter *sa, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct sfc_flow_ops_by_spec *ops;
+ int rc;
+
+ ops = sfc_flow_get_ops_by_spec(flow);
+ if (ops == NULL || ops->insert == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "No backend to handle this flow");
+ return rte_errno;
+ }
+
+ rc = ops->insert(sa, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to insert the flow rule");
+ }
+
+ return rc;
+}
+
+static int
+sfc_flow_remove(struct sfc_adapter *sa, struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ const struct sfc_flow_ops_by_spec *ops;
+ int rc;
+
+ ops = sfc_flow_get_ops_by_spec(flow);
+ if (ops == NULL || ops->remove == NULL) {
+ rte_flow_error_set(error, ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
+ "No backend to handle this flow");
+ return rte_errno;
+ }
+
+ rc = ops->remove(sa, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL, "Failed to remove the flow rule");
+ }
+
+ return rc;
+}
+
static int
sfc_flow_validate(struct rte_eth_dev *dev,
const struct rte_flow_attr *attr,
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct rte_flow flow;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
+ struct rte_flow *flow;
+ int rc;
+
+ flow = sfc_flow_zmalloc(error);
+ if (flow == NULL)
+ return -rte_errno;
- memset(&flow, 0, sizeof(flow));
+ rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
+
+ sfc_flow_free(sa, flow);
- return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
+ return rc;
}
static struct rte_flow *
const struct rte_flow_action actions[],
struct rte_flow_error *error)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct rte_flow *flow = NULL;
int rc;
- flow = rte_zmalloc("sfc_rte_flow", sizeof(*flow), 0);
- if (flow == NULL) {
- rte_flow_error_set(error, ENOMEM,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to allocate memory");
+ flow = sfc_flow_zmalloc(error);
+ if (flow == NULL)
goto fail_no_mem;
- }
rc = sfc_flow_parse(dev, attr, pattern, actions, flow, error);
if (rc != 0)
goto fail_bad_value;
- TAILQ_INSERT_TAIL(&sa->filter.flow_list, flow, entries);
-
sfc_adapter_lock(sa);
+ TAILQ_INSERT_TAIL(&sa->flow_list, flow, entries);
+
if (sa->state == SFC_ADAPTER_STARTED) {
- rc = sfc_flow_filter_insert(sa, flow);
- if (rc != 0) {
- rte_flow_error_set(error, rc,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to insert filter");
- goto fail_filter_insert;
- }
+ rc = sfc_flow_insert(sa, flow, error);
+ if (rc != 0)
+ goto fail_flow_insert;
}
sfc_adapter_unlock(sa);
return flow;
-fail_filter_insert:
- TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
+fail_flow_insert:
+ TAILQ_REMOVE(&sa->flow_list, flow, entries);
fail_bad_value:
- rte_free(flow);
+ sfc_flow_free(sa, flow);
sfc_adapter_unlock(sa);
fail_no_mem:
return NULL;
}
-static int
-sfc_flow_remove(struct sfc_adapter *sa,
- struct rte_flow *flow,
- struct rte_flow_error *error)
-{
- int rc = 0;
-
- SFC_ASSERT(sfc_adapter_is_locked(sa));
-
- if (sa->state == SFC_ADAPTER_STARTED) {
- rc = sfc_flow_filter_remove(sa, flow);
- if (rc != 0)
- rte_flow_error_set(error, rc,
- RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
- "Failed to destroy flow rule");
- }
-
- TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
- rte_free(flow);
-
- return rc;
-}
-
static int
sfc_flow_destroy(struct rte_eth_dev *dev,
struct rte_flow *flow,
struct rte_flow_error *error)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct rte_flow *flow_ptr;
int rc = EINVAL;
sfc_adapter_lock(sa);
- TAILQ_FOREACH(flow_ptr, &sa->filter.flow_list, entries) {
+ TAILQ_FOREACH(flow_ptr, &sa->flow_list, entries) {
if (flow_ptr == flow)
rc = 0;
}
goto fail_bad_value;
}
- rc = sfc_flow_remove(sa, flow, error);
+ if (sa->state == SFC_ADAPTER_STARTED)
+ rc = sfc_flow_remove(sa, flow, error);
+
+ TAILQ_REMOVE(&sa->flow_list, flow, entries);
+ sfc_flow_free(sa, flow);
fail_bad_value:
sfc_adapter_unlock(sa);
sfc_flow_flush(struct rte_eth_dev *dev,
struct rte_flow_error *error)
{
- struct sfc_adapter *sa = dev->data->dev_private;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
struct rte_flow *flow;
- int rc = 0;
int ret = 0;
sfc_adapter_lock(sa);
- while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
- rc = sfc_flow_remove(sa, flow, error);
- if (rc != 0)
- ret = rc;
+ while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
+ if (sa->state == SFC_ADAPTER_STARTED) {
+ int rc;
+
+ rc = sfc_flow_remove(sa, flow, error);
+ if (rc != 0)
+ ret = rc;
+ }
+
+ TAILQ_REMOVE(&sa->flow_list, flow, entries);
+ sfc_flow_free(sa, flow);
}
sfc_adapter_unlock(sa);
sfc_flow_isolate(struct rte_eth_dev *dev, int enable,
struct rte_flow_error *error)
{
- struct sfc_adapter *sa = dev->data->dev_private;
- struct sfc_port *port = &sa->port;
+ struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
int ret = 0;
sfc_adapter_lock(sa);
NULL, "please close the port first");
ret = -rte_errno;
} else {
- port->isolated = (enable) ? B_TRUE : B_FALSE;
+ sfc_sa2shared(sa)->isolated = (enable) ? B_TRUE : B_FALSE;
}
sfc_adapter_unlock(sa);
{
SFC_ASSERT(sfc_adapter_is_locked(sa));
- TAILQ_INIT(&sa->filter.flow_list);
+ TAILQ_INIT(&sa->flow_list);
}
void
SFC_ASSERT(sfc_adapter_is_locked(sa));
- while ((flow = TAILQ_FIRST(&sa->filter.flow_list)) != NULL) {
- TAILQ_REMOVE(&sa->filter.flow_list, flow, entries);
- rte_free(flow);
+ while ((flow = TAILQ_FIRST(&sa->flow_list)) != NULL) {
+ TAILQ_REMOVE(&sa->flow_list, flow, entries);
+ sfc_flow_free(sa, flow);
}
}
SFC_ASSERT(sfc_adapter_is_locked(sa));
- TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
- sfc_flow_filter_remove(sa, flow);
+ TAILQ_FOREACH(flow, &sa->flow_list, entries)
+ sfc_flow_remove(sa, flow, NULL);
}
int
SFC_ASSERT(sfc_adapter_is_locked(sa));
- TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
- rc = sfc_flow_filter_insert(sa, flow);
+ TAILQ_FOREACH(flow, &sa->flow_list, entries) {
+ rc = sfc_flow_insert(sa, flow, NULL);
if (rc != 0)
goto fail_bad_flow;
}