-/*-
- * BSD LICENSE
+/* SPDX-License-Identifier: BSD-3-Clause
*
- * Copyright (c) 2017 Solarflare Communications Inc.
+ * Copyright (c) 2017-2018 Solarflare Communications Inc.
* All rights reserved.
*
* This software was jointly developed between OKTET Labs (under contract
* for Solarflare) and Solarflare Communications, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
- * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <rte_tailq.h>
#include <rte_common.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_eth_ctrl.h>
#include <rte_ether.h>
#include <rte_flow.h>
static sfc_flow_item_parse sfc_flow_parse_ipv6;
static sfc_flow_item_parse sfc_flow_parse_tcp;
static sfc_flow_item_parse sfc_flow_parse_udp;
+static sfc_flow_item_parse sfc_flow_parse_vxlan;
+static sfc_flow_item_parse sfc_flow_parse_geneve;
+static sfc_flow_item_parse sfc_flow_parse_nvgre;
static boolean_t
sfc_flow_is_zero(const uint8_t *buf, unsigned int size)
return -rte_errno;
}
- mask = (const uint8_t *)def_mask;
+ mask = def_mask;
} else {
- mask = (const uint8_t *)item->mask;
+ mask = item->mask;
}
- spec = (const uint8_t *)item->spec;
- last = (const uint8_t *)item->last;
+ spec = item->spec;
+ last = item->last;
if (spec == NULL)
goto exit;
* Convert Ethernet item to EFX filter specification.
*
* @param item[in]
- * Item specification. Only source and destination addresses and
- * Ethernet type fields are supported. In addition to full and
- * empty masks of destination address, individual/group mask is
- * also supported. If the mask is NULL, default mask will be used.
- * Ranging is not supported.
+ * Item specification. Outer frame specification may only comprise
+ * source/destination addresses and Ethertype field.
+ * Inner frame specification may contain destination address only.
+ * There is support for individual/group mask as well as for empty and full.
+ * If the mask is NULL, default mask will be used. Ranging is not supported.
* @param efx_spec[in, out]
* EFX filter specification to update.
* @param[out] error
.src.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.type = 0xffff,
};
+ const struct rte_flow_item_eth ifrm_supp_mask = {
+ .dst.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
+ };
const uint8_t ig_mask[EFX_MAC_ADDR_LEN] = {
0x01, 0x00, 0x00, 0x00, 0x00, 0x00
};
+ const struct rte_flow_item_eth *supp_mask_p;
+ const struct rte_flow_item_eth *def_mask_p;
+ uint8_t *loc_mac = NULL;
+ boolean_t is_ifrm = (efx_spec->efs_encap_type !=
+ EFX_TUNNEL_PROTOCOL_NONE);
+
+ if (is_ifrm) {
+ supp_mask_p = &ifrm_supp_mask;
+ def_mask_p = &ifrm_supp_mask;
+ loc_mac = efx_spec->efs_ifrm_loc_mac;
+ } else {
+ supp_mask_p = &supp_mask;
+ def_mask_p = &rte_flow_item_eth_mask;
+ loc_mac = efx_spec->efs_loc_mac;
+ }
rc = sfc_flow_parse_init(item,
(const void **)&spec,
(const void **)&mask,
- &supp_mask,
- &rte_flow_item_eth_mask,
+ supp_mask_p, def_mask_p,
sizeof(struct rte_flow_item_eth),
error);
if (rc != 0)
return rc;
- /* If "spec" is not set, could be any Ethernet */
- if (spec == NULL)
- return 0;
+ /*
+ * If "spec" is not set, could be any Ethernet, but for the inner frame
+ * type of destination MAC must be set
+ */
+ if (spec == NULL) {
+ if (is_ifrm)
+ goto fail_bad_ifrm_dst_mac;
+ else
+ return 0;
+ }
if (is_same_ether_addr(&mask->dst, &supp_mask.dst)) {
- efx_spec->efs_match_flags |= EFX_FILTER_MATCH_LOC_MAC;
- rte_memcpy(efx_spec->efs_loc_mac, spec->dst.addr_bytes,
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_LOC_MAC :
+ EFX_FILTER_MATCH_LOC_MAC;
+ rte_memcpy(loc_mac, spec->dst.addr_bytes,
EFX_MAC_ADDR_LEN);
} else if (memcmp(mask->dst.addr_bytes, ig_mask,
EFX_MAC_ADDR_LEN) == 0) {
if (is_unicast_ether_addr(&spec->dst))
- efx_spec->efs_match_flags |=
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_UCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_UCAST_DST;
else
- efx_spec->efs_match_flags |=
+ efx_spec->efs_match_flags |= is_ifrm ?
+ EFX_FILTER_MATCH_IFRM_UNKNOWN_MCAST_DST :
EFX_FILTER_MATCH_UNKNOWN_MCAST_DST;
} else if (!is_zero_ether_addr(&mask->dst)) {
goto fail_bad_mask;
+ } else if (is_ifrm) {
+ goto fail_bad_ifrm_dst_mac;
}
+ /*
+ * ifrm_supp_mask ensures that the source address and
+ * ethertype masks are equal to zero in inner frame,
+ * so these fields are filled in only for the outer frame
+ */
if (is_same_ether_addr(&mask->src, &supp_mask.src)) {
efx_spec->efs_match_flags |= EFX_FILTER_MATCH_REM_MAC;
rte_memcpy(efx_spec->efs_rem_mac, spec->src.addr_bytes,
RTE_FLOW_ERROR_TYPE_ITEM, item,
"Bad mask in the ETH pattern item");
return -rte_errno;
+
+fail_bad_ifrm_dst_mac:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Type of destination MAC address in inner frame "
+ "must be set");
+ return -rte_errno;
}
/**
return -rte_errno;
}
+/*
+ * Filters for encapsulated packets match based on the EtherType and IP
+ * protocol in the outer frame.
+ */
+static int
+sfc_flow_set_match_flags_for_encap_pkts(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ uint8_t ip_proto,
+ struct rte_flow_error *error)
+{
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_IP_PROTO)) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_IP_PROTO;
+ efx_spec->efs_ip_proto = ip_proto;
+ } else if (efx_spec->efs_ip_proto != ip_proto) {
+ switch (ip_proto) {
+ case EFX_IPPROTO_UDP:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer IP header protocol must be UDP "
+ "in VxLAN/GENEVE pattern");
+ return -rte_errno;
+
+ case EFX_IPPROTO_GRE:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer IP header protocol must be GRE "
+ "in NVGRE pattern");
+ return -rte_errno;
+
+ default:
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Only VxLAN/GENEVE/NVGRE tunneling patterns "
+ "are supported");
+ return -rte_errno;
+ }
+ }
+
+ if (!(efx_spec->efs_match_flags & EFX_FILTER_MATCH_ETHER_TYPE)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer frame EtherType in pattern with tunneling "
+ "must be set");
+ return -rte_errno;
+ } else if (efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV4 &&
+ efx_spec->efs_ether_type != EFX_ETHER_TYPE_IPV6) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Outer frame EtherType in pattern with tunneling "
+ "must be IPv4 or IPv6");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+static int
+sfc_flow_set_efx_spec_vni_or_vsid(efx_filter_spec_t *efx_spec,
+ const uint8_t *vni_or_vsid_val,
+ const uint8_t *vni_or_vsid_mask,
+ const struct rte_flow_item *item,
+ struct rte_flow_error *error)
+{
+ const uint8_t vni_or_vsid_full_mask[EFX_VNI_OR_VSID_LEN] = {
+ 0xff, 0xff, 0xff
+ };
+
+ if (memcmp(vni_or_vsid_mask, vni_or_vsid_full_mask,
+ EFX_VNI_OR_VSID_LEN) == 0) {
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_VNI_OR_VSID;
+ rte_memcpy(efx_spec->efs_vni_or_vsid, vni_or_vsid_val,
+ EFX_VNI_OR_VSID_LEN);
+ } else if (!sfc_flow_is_zero(vni_or_vsid_mask, EFX_VNI_OR_VSID_LEN)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported VNI/VSID mask");
+ return -rte_errno;
+ }
+
+ return 0;
+}
+
+/**
+ * Convert VXLAN item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only VXLAN network identifier field is supported.
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_vxlan(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_vxlan *spec = NULL;
+ const struct rte_flow_item_vxlan *mask = NULL;
+ const struct rte_flow_item_vxlan supp_mask = {
+ .vni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_vxlan_mask,
+ sizeof(struct rte_flow_item_vxlan),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_UDP, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_VXLAN;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
+ mask->vni, item, error);
+
+ return rc;
+}
+
+/**
+ * Convert GENEVE item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only Virtual Network Identifier and protocol type
+ * fields are supported. But protocol type can be only Ethernet (0x6558).
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_geneve(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_geneve *spec = NULL;
+ const struct rte_flow_item_geneve *mask = NULL;
+ const struct rte_flow_item_geneve supp_mask = {
+ .protocol = RTE_BE16(0xffff),
+ .vni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_geneve_mask,
+ sizeof(struct rte_flow_item_geneve),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_UDP, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_GENEVE;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ if (mask->protocol == supp_mask.protocol) {
+ if (spec->protocol != rte_cpu_to_be_16(ETHER_TYPE_TEB)) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "GENEVE encap. protocol must be Ethernet "
+ "(0x6558) in the GENEVE pattern item");
+ return -rte_errno;
+ }
+ } else if (mask->protocol != 0) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM, item,
+ "Unsupported mask for GENEVE encap. protocol");
+ return -rte_errno;
+ }
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->vni,
+ mask->vni, item, error);
+
+ return rc;
+}
+
+/**
+ * Convert NVGRE item to EFX filter specification.
+ *
+ * @param item[in]
+ * Item specification. Only virtual subnet ID field is supported.
+ * If the mask is NULL, default mask will be used.
+ * Ranging is not supported.
+ * @param efx_spec[in, out]
+ * EFX filter specification to update.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ */
+static int
+sfc_flow_parse_nvgre(const struct rte_flow_item *item,
+ efx_filter_spec_t *efx_spec,
+ struct rte_flow_error *error)
+{
+ int rc;
+ const struct rte_flow_item_nvgre *spec = NULL;
+ const struct rte_flow_item_nvgre *mask = NULL;
+ const struct rte_flow_item_nvgre supp_mask = {
+ .tni = { 0xff, 0xff, 0xff }
+ };
+
+ rc = sfc_flow_parse_init(item,
+ (const void **)&spec,
+ (const void **)&mask,
+ &supp_mask,
+ &rte_flow_item_nvgre_mask,
+ sizeof(struct rte_flow_item_nvgre),
+ error);
+ if (rc != 0)
+ return rc;
+
+ rc = sfc_flow_set_match_flags_for_encap_pkts(item, efx_spec,
+ EFX_IPPROTO_GRE, error);
+ if (rc != 0)
+ return rc;
+
+ efx_spec->efs_encap_type = EFX_TUNNEL_PROTOCOL_NVGRE;
+ efx_spec->efs_match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+
+ if (spec == NULL)
+ return 0;
+
+ rc = sfc_flow_set_efx_spec_vni_or_vsid(efx_spec, spec->tni,
+ mask->tni, item, error);
+
+ return rc;
+}
+
static const struct sfc_flow_item sfc_flow_items[] = {
{
.type = RTE_FLOW_ITEM_TYPE_VOID,
.layer = SFC_FLOW_ITEM_L4,
.parse = sfc_flow_parse_udp,
},
+ {
+ .type = RTE_FLOW_ITEM_TYPE_VXLAN,
+ .prev_layer = SFC_FLOW_ITEM_L4,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_vxlan,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_GENEVE,
+ .prev_layer = SFC_FLOW_ITEM_L4,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_geneve,
+ },
+ {
+ .type = RTE_FLOW_ITEM_TYPE_NVGRE,
+ .prev_layer = SFC_FLOW_ITEM_L3,
+ .layer = SFC_FLOW_ITEM_START_LAYER,
+ .parse = sfc_flow_parse_nvgre,
+ },
};
/*
}
flow->spec.efs_flags |= EFX_FILTER_FLAG_RX;
- flow->spec.efs_rss_context = EFX_FILTER_SPEC_RSS_CONTEXT_DEFAULT;
+ flow->spec.efs_rss_context = EFX_RSS_CONTEXT_DEFAULT;
return 0;
}
{
int rc;
unsigned int prev_layer = SFC_FLOW_ITEM_ANY_LAYER;
+ boolean_t is_ifrm = B_FALSE;
const struct sfc_flow_item *item;
if (pattern == NULL) {
return -rte_errno;
}
+ /*
+ * Allow only VOID and ETH pattern items in the inner frame.
+ * Also check that there is only one tunneling protocol.
+ */
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_VOID:
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ break;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ if (is_ifrm) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "More than one tunneling protocol");
+ return -rte_errno;
+ }
+ is_ifrm = B_TRUE;
+ break;
+
+ default:
+ if (is_ifrm) {
+ rte_flow_error_set(error, EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ pattern,
+ "There is an unsupported pattern item "
+ "in the inner frame");
+ return -rte_errno;
+ }
+ break;
+ }
+
rc = item->parse(pattern, &flow->spec, error);
if (rc != 0)
return rc;
return 0;
}
+#if EFSYS_OPT_RX_SCALE
+static int
+sfc_flow_parse_rss(struct sfc_adapter *sa,
+ const struct rte_flow_action_rss *rss,
+ struct rte_flow *flow)
+{
+ unsigned int rxq_sw_index;
+ struct sfc_rxq *rxq;
+ unsigned int rxq_hw_index_min;
+ unsigned int rxq_hw_index_max;
+ const struct rte_eth_rss_conf *rss_conf = rss->rss_conf;
+ uint64_t rss_hf;
+ uint8_t *rss_key = NULL;
+ struct sfc_flow_rss *sfc_rss_conf = &flow->rss_conf;
+ unsigned int i;
+
+ if (rss->num == 0)
+ return -EINVAL;
+
+ rxq_sw_index = sa->rxq_count - 1;
+ rxq = sa->rxq_info[rxq_sw_index].rxq;
+ rxq_hw_index_min = rxq->hw_index;
+ rxq_hw_index_max = 0;
+
+ for (i = 0; i < rss->num; ++i) {
+ rxq_sw_index = rss->queue[i];
+
+ if (rxq_sw_index >= sa->rxq_count)
+ return -EINVAL;
+
+ rxq = sa->rxq_info[rxq_sw_index].rxq;
+
+ if (rxq->hw_index < rxq_hw_index_min)
+ rxq_hw_index_min = rxq->hw_index;
+
+ if (rxq->hw_index > rxq_hw_index_max)
+ rxq_hw_index_max = rxq->hw_index;
+ }
+
+ rss_hf = (rss_conf != NULL) ? rss_conf->rss_hf : SFC_RSS_OFFLOADS;
+ if ((rss_hf & ~SFC_RSS_OFFLOADS) != 0)
+ return -EINVAL;
+
+ if (rss_conf != NULL) {
+ if (rss_conf->rss_key_len != sizeof(sa->rss_key))
+ return -EINVAL;
+
+ rss_key = rss_conf->rss_key;
+ } else {
+ rss_key = sa->rss_key;
+ }
+
+ flow->rss = B_TRUE;
+
+ sfc_rss_conf->rxq_hw_index_min = rxq_hw_index_min;
+ sfc_rss_conf->rxq_hw_index_max = rxq_hw_index_max;
+ sfc_rss_conf->rss_hash_types = sfc_rte_to_efx_hash_type(rss_hf);
+ rte_memcpy(sfc_rss_conf->rss_key, rss_key, sizeof(sa->rss_key));
+
+ for (i = 0; i < RTE_DIM(sfc_rss_conf->rss_tbl); ++i) {
+ unsigned int rxq_sw_index = rss->queue[i % rss->num];
+ struct sfc_rxq *rxq = sa->rxq_info[rxq_sw_index].rxq;
+
+ sfc_rss_conf->rss_tbl[i] = rxq->hw_index - rxq_hw_index_min;
+ }
+
+ return 0;
+}
+#endif /* EFSYS_OPT_RX_SCALE */
+
+static int
+sfc_flow_filter_insert(struct sfc_adapter *sa,
+ struct rte_flow *flow)
+{
+ efx_filter_spec_t *spec = &flow->spec;
+
+#if EFSYS_OPT_RX_SCALE
+ struct sfc_flow_rss *rss = &flow->rss_conf;
+ int rc = 0;
+
+ if (flow->rss) {
+ unsigned int rss_spread = MIN(rss->rxq_hw_index_max -
+ rss->rxq_hw_index_min + 1,
+ EFX_MAXRSS);
+
+ rc = efx_rx_scale_context_alloc(sa->nic,
+ EFX_RX_SCALE_EXCLUSIVE,
+ rss_spread,
+ &spec->efs_rss_context);
+ if (rc != 0)
+ goto fail_scale_context_alloc;
+
+ rc = efx_rx_scale_mode_set(sa->nic, spec->efs_rss_context,
+ EFX_RX_HASHALG_TOEPLITZ,
+ rss->rss_hash_types, B_TRUE);
+ if (rc != 0)
+ goto fail_scale_mode_set;
+
+ rc = efx_rx_scale_key_set(sa->nic, spec->efs_rss_context,
+ rss->rss_key,
+ sizeof(sa->rss_key));
+ if (rc != 0)
+ goto fail_scale_key_set;
+
+ spec->efs_dmaq_id = rss->rxq_hw_index_min;
+ spec->efs_flags |= EFX_FILTER_FLAG_RX_RSS;
+ }
+
+ rc = efx_filter_insert(sa->nic, spec);
+ if (rc != 0)
+ goto fail_filter_insert;
+
+ if (flow->rss) {
+ /*
+ * Scale table is set after filter insertion because
+ * the table entries are relative to the base RxQ ID
+ * and the latter is submitted to the HW by means of
+ * inserting a filter, so by the time of the request
+ * the HW knows all the information needed to verify
+ * the table entries, and the operation will succeed
+ */
+ rc = efx_rx_scale_tbl_set(sa->nic, spec->efs_rss_context,
+ rss->rss_tbl, RTE_DIM(rss->rss_tbl));
+ if (rc != 0)
+ goto fail_scale_tbl_set;
+ }
+
+ return 0;
+
+fail_scale_tbl_set:
+ efx_filter_remove(sa->nic, spec);
+
+fail_filter_insert:
+fail_scale_key_set:
+fail_scale_mode_set:
+ if (flow->rss)
+ efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
+
+fail_scale_context_alloc:
+ return rc;
+#else /* !EFSYS_OPT_RX_SCALE */
+ return efx_filter_insert(sa->nic, spec);
+#endif /* EFSYS_OPT_RX_SCALE */
+}
+
+static int
+sfc_flow_filter_remove(struct sfc_adapter *sa,
+ struct rte_flow *flow)
+{
+ efx_filter_spec_t *spec = &flow->spec;
+ int rc = 0;
+
+ rc = efx_filter_remove(sa->nic, spec);
+ if (rc != 0)
+ return rc;
+
+#if EFSYS_OPT_RX_SCALE
+ if (flow->rss)
+ rc = efx_rx_scale_context_free(sa->nic, spec->efs_rss_context);
+#endif /* EFSYS_OPT_RX_SCALE */
+
+ return rc;
+}
+
static int
sfc_flow_parse_actions(struct sfc_adapter *sa,
const struct rte_flow_action actions[],
is_specified = B_TRUE;
break;
+#if EFSYS_OPT_RX_SCALE
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ rc = sfc_flow_parse_rss(sa, actions->conf, flow);
+ if (rc != 0) {
+ rte_flow_error_set(error, rc,
+ RTE_FLOW_ERROR_TYPE_ACTION, actions,
+ "Bad RSS action");
+ return -rte_errno;
+ }
+
+ is_specified = B_TRUE;
+ break;
+#endif /* EFSYS_OPT_RX_SCALE */
+
default:
rte_flow_error_set(error, ENOTSUP,
RTE_FLOW_ERROR_TYPE_ACTION, actions,
struct sfc_adapter *sa = dev->data->dev_private;
int rc;
- memset(&flow->spec, 0, sizeof(flow->spec));
-
rc = sfc_flow_parse_attr(attr, flow, error);
if (rc != 0)
goto fail_bad_value;
{
struct rte_flow flow;
+ memset(&flow, 0, sizeof(flow));
+
return sfc_flow_parse(dev, attr, pattern, actions, &flow, error);
}
sfc_adapter_lock(sa);
if (sa->state == SFC_ADAPTER_STARTED) {
- rc = efx_filter_insert(sa->nic, &flow->spec);
+ rc = sfc_flow_filter_insert(sa, flow);
if (rc != 0) {
rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
SFC_ASSERT(sfc_adapter_is_locked(sa));
if (sa->state == SFC_ADAPTER_STARTED) {
- rc = efx_filter_remove(sa->nic, &flow->spec);
+ rc = sfc_flow_filter_remove(sa, flow);
if (rc != 0)
rte_flow_error_set(error, rc,
RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
SFC_ASSERT(sfc_adapter_is_locked(sa));
TAILQ_FOREACH(flow, &sa->filter.flow_list, entries)
- efx_filter_remove(sa->nic, &flow->spec);
+ sfc_flow_filter_remove(sa, flow);
}
int
SFC_ASSERT(sfc_adapter_is_locked(sa));
TAILQ_FOREACH(flow, &sa->filter.flow_list, entries) {
- rc = efx_filter_insert(sa->nic, &flow->spec);
+ rc = sfc_flow_filter_insert(sa, flow);
if (rc != 0)
goto fail_bad_flow;
}