static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
static int
sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
+ __rte_unused struct rte_mempool *mb_pool,
unsigned int *rxq_entries,
unsigned int *evq_entries,
unsigned int *rxq_max_fill_level)
.type = SFC_DP_RX,
.hw_fw_caps = 0,
},
- .features = SFC_DP_RX_FEAT_SCATTER,
+ .features = SFC_DP_RX_FEAT_SCATTER |
+ SFC_DP_RX_FEAT_CHECKSUM,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
.qdestroy = sfc_efx_rx_qdestroy,
static int
sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
{
- boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
+ struct sfc_rss *rss = &sa->rss;
+ boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
struct sfc_port *port = &sa->port;
int rc;
* repeat this step without promiscuous and all-multicast flags set
*/
retry:
- rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
+ rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
if (rc == 0)
return 0;
else if (rc != EOPNOTSUPP)
rxq_info = &sa->rxq_info[sw_index];
rxq = rxq_info->rxq;
+ SFC_ASSERT(rxq != NULL);
SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
evq = rxq->evq;
if (rc != 0)
goto fail_ev_qstart;
- rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
- &rxq->mem, rxq_info->entries,
- 0 /* not used on EF10 */, rxq_info->type_flags,
- evq->common, &rxq->common);
+ switch (rxq_info->type) {
+ case EFX_RXQ_TYPE_DEFAULT:
+ rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
+ &rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
+ rxq_info->type_flags, evq->common, &rxq->common);
+ break;
+ case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
+ struct rte_mempool *mp = rxq->refill_mb_pool;
+ struct rte_mempool_info mp_info;
+
+ rc = rte_mempool_ops_get_info(mp, &mp_info);
+ if (rc != 0) {
+ /* Positive errno is used in the driver */
+ rc = -rc;
+ goto fail_mp_get_info;
+ }
+ if (mp_info.contig_block_size <= 0) {
+ rc = EINVAL;
+ goto fail_bad_contig_block_size;
+ }
+ rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
+ mp_info.contig_block_size, rxq->buf_size,
+ mp->header_size + mp->elt_size + mp->trailer_size,
+ sa->rxd_wait_timeout_ns,
+ &rxq->mem, rxq_info->entries, rxq_info->type_flags,
+ evq->common, &rxq->common);
+ break;
+ }
+ default:
+ rc = ENOTSUP;
+ }
if (rc != 0)
goto fail_rx_qcreate;
sfc_rx_qflush(sa, sw_index);
fail_rx_qcreate:
+fail_bad_contig_block_size:
+fail_mp_get_info:
sfc_ev_qstop(evq);
fail_ev_qstart:
rxq_info = &sa->rxq_info[sw_index];
rxq = rxq_info->rxq;
- if (rxq->state == SFC_RXQ_INITIALIZED)
+ if (rxq == NULL || rxq->state == SFC_RXQ_INITIALIZED)
return;
SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
uint64_t caps = 0;
caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- caps |= DEV_RX_OFFLOAD_CRC_STRIP;
- caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
- caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
- caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
+
+ if (sa->dp_rx->features & SFC_DP_RX_FEAT_CHECKSUM) {
+ caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
+ caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
+ caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
+ }
if (encp->enc_tunnel_encapsulations_supported &&
(sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
return caps;
}
-static void
-sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
- const char *verdict, uint64_t offloads)
-{
- unsigned long long bit;
-
- while ((bit = __builtin_ffsll(offloads)) != 0) {
- uint64_t flag = (1ULL << --bit);
-
- sfc_err(sa, "Rx %s offload %s %s", offload_group,
- rte_eth_dev_rx_offload_name(flag), verdict);
-
- offloads &= ~flag;
- }
-}
-
-static boolean_t
-sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
-{
- uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
- uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
- uint64_t rejected = requested & ~supported;
- uint64_t missing = (requested & mandatory) ^ mandatory;
- boolean_t mismatch = B_FALSE;
-
- if (rejected) {
- sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
- mismatch = B_TRUE;
- }
-
- if (missing) {
- sfc_rx_log_offloads(sa, "queue", "must be set", missing);
- mismatch = B_TRUE;
- }
-
- return mismatch;
-}
-
static int
sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
- const struct rte_eth_rxconf *rx_conf)
+ const struct rte_eth_rxconf *rx_conf,
+ __rte_unused uint64_t offloads)
{
- uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
- sfc_rx_get_queue_offload_caps(sa);
int rc = 0;
if (rx_conf->rx_thresh.pthresh != 0 ||
rc = EINVAL;
}
- if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
- DEV_RX_OFFLOAD_CHECKSUM)
- sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
-
- if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
- sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
-
- if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
- rc = EINVAL;
-
return rc;
}
struct rte_mempool *mb_pool)
{
const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ struct sfc_rss *rss = &sa->rss;
int rc;
unsigned int rxq_entries;
unsigned int evq_entries;
unsigned int rxq_max_fill_level;
+ uint64_t offloads;
uint16_t buf_size;
struct sfc_rxq_info *rxq_info;
struct sfc_evq *evq;
struct sfc_rxq *rxq;
struct sfc_dp_rx_qcreate_info info;
- rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, &rxq_entries, &evq_entries,
- &rxq_max_fill_level);
+ rc = sa->dp_rx->qsize_up_rings(nb_rx_desc, mb_pool, &rxq_entries,
+ &evq_entries, &rxq_max_fill_level);
if (rc != 0)
goto fail_size_up_rings;
SFC_ASSERT(rxq_entries >= EFX_RXQ_MINNDESCS);
SFC_ASSERT(rxq_entries <= EFX_RXQ_MAXNDESCS);
- SFC_ASSERT(rxq_entries >= nb_rx_desc);
SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
- rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf);
+ offloads = rx_conf->offloads |
+ sa->eth_dev->data->dev_conf.rxmode.offloads;
+ rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
if (rc != 0)
goto fail_bad_conf;
}
if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
- (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
+ (~offloads & DEV_RX_OFFLOAD_SCATTER)) {
sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
"object size is too small", sw_index);
sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
rxq_info->entries = rxq_entries;
- rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
+ if (sa->dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
+ rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
+ else
+ rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
+
rxq_info->type_flags =
- (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
+ (offloads & DEV_RX_OFFLOAD_SCATTER) ?
EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
if ((encp->enc_tunnel_encapsulations_supported != 0) &&
rxq->refill_threshold =
RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
rxq->refill_mb_pool = mb_pool;
+ rxq->buf_size = buf_size;
rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
socket_id, &rxq->mem);
info.batch_max = encp->enc_rx_batch_max;
info.prefix_size = encp->enc_rx_prefix_size;
- if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
+ if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0)
info.flags |= SFC_RXQ_FLAG_RSS_HASH;
info.rxq_entries = rxq_info->entries;
rte_free(rxq);
}
-efx_rx_hash_type_t
-sfc_rte_to_efx_hash_type(uint64_t rss_hf)
+/*
+ * Mapping between RTE RSS hash functions and their EFX counterparts.
+ */
+struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
+ { ETH_RSS_NONFRAG_IPV4_TCP,
+ EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV4_UDP,
+ EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX,
+ EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
+ { ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_UDP_EX,
+ EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
+ { ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER,
+ EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
+ EFX_RX_HASH(IPV4, 2TUPLE) },
+ { ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER |
+ ETH_RSS_IPV6_EX,
+ EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
+ EFX_RX_HASH(IPV6, 2TUPLE) }
+};
+
+static efx_rx_hash_type_t
+sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
+ unsigned int *hash_type_flags_supported,
+ unsigned int nb_hash_type_flags_supported)
+{
+ efx_rx_hash_type_t hash_type_masked = 0;
+ unsigned int i, j;
+
+ for (i = 0; i < nb_hash_type_flags_supported; ++i) {
+ unsigned int class_tuple_lbn[] = {
+ EFX_RX_CLASS_IPV4_TCP_LBN,
+ EFX_RX_CLASS_IPV4_UDP_LBN,
+ EFX_RX_CLASS_IPV4_LBN,
+ EFX_RX_CLASS_IPV6_TCP_LBN,
+ EFX_RX_CLASS_IPV6_UDP_LBN,
+ EFX_RX_CLASS_IPV6_LBN
+ };
+
+ for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
+ unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
+ unsigned int flag;
+
+ tuple_mask <<= class_tuple_lbn[j];
+ flag = hash_type & tuple_mask;
+
+ if (flag == hash_type_flags_supported[i])
+ hash_type_masked |= flag;
+ }
+ }
+
+ return hash_type_masked;
+}
+
+int
+sfc_rx_hash_init(struct sfc_adapter *sa)
{
- efx_rx_hash_type_t efx_hash_types = 0;
+ struct sfc_rss *rss = &sa->rss;
+ const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
+ uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
+ efx_rx_hash_alg_t alg;
+ unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
+ unsigned int nb_flags_supp;
+ struct sfc_rss_hf_rte_to_efx *hf_map;
+ struct sfc_rss_hf_rte_to_efx *entry;
+ efx_rx_hash_type_t efx_hash_types;
+ unsigned int i;
+ int rc;
- if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
- efx_hash_types |= EFX_RX_HASH_IPV4;
+ if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
+ alg = EFX_RX_HASHALG_TOEPLITZ;
+ else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
+ alg = EFX_RX_HASHALG_PACKED_STREAM;
+ else
+ return EINVAL;
- if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
- efx_hash_types |= EFX_RX_HASH_TCPIPV4;
+ rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
+ RTE_DIM(flags_supp), &nb_flags_supp);
+ if (rc != 0)
+ return rc;
- if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
- efx_hash_types |= EFX_RX_HASH_IPV6;
+ hf_map = rte_calloc_socket("sfc-rss-hf-map",
+ RTE_DIM(sfc_rss_hf_map),
+ sizeof(*hf_map), 0, sa->socket_id);
+ if (hf_map == NULL)
+ return ENOMEM;
+
+ entry = hf_map;
+ efx_hash_types = 0;
+ for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
+ efx_rx_hash_type_t ht;
+
+ ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
+ flags_supp, nb_flags_supp);
+ if (ht != 0) {
+ entry->rte = sfc_rss_hf_map[i].rte;
+ entry->efx = ht;
+ efx_hash_types |= ht;
+ ++entry;
+ }
+ }
- if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
- efx_hash_types |= EFX_RX_HASH_TCPIPV6;
+ rss->hash_alg = alg;
+ rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
+ rss->hf_map = hf_map;
+ rss->hash_types = efx_hash_types;
- return efx_hash_types;
+ return 0;
}
-uint64_t
-sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
+void
+sfc_rx_hash_fini(struct sfc_adapter *sa)
{
- uint64_t rss_hf = 0;
+ struct sfc_rss *rss = &sa->rss;
+
+ rte_free(rss->hf_map);
+}
+
+int
+sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
+ efx_rx_hash_type_t *efx)
+{
+ struct sfc_rss *rss = &sa->rss;
+ efx_rx_hash_type_t hash_types = 0;
+ unsigned int i;
+
+ for (i = 0; i < rss->hf_map_nb_entries; ++i) {
+ uint64_t rte_mask = rss->hf_map[i].rte;
+
+ if ((rte & rte_mask) != 0) {
+ rte &= ~rte_mask;
+ hash_types |= rss->hf_map[i].efx;
+ }
+ }
+
+ if (rte != 0) {
+ sfc_err(sa, "unsupported hash functions requested");
+ return EINVAL;
+ }
- if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
- rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
- ETH_RSS_NONFRAG_IPV4_OTHER);
+ *efx = hash_types;
+
+ return 0;
+}
- if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+uint64_t
+sfc_rx_hf_efx_to_rte(struct sfc_adapter *sa, efx_rx_hash_type_t efx)
+{
+ struct sfc_rss *rss = &sa->rss;
+ uint64_t rte = 0;
+ unsigned int i;
- if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
- rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
- ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
+ for (i = 0; i < rss->hf_map_nb_entries; ++i) {
+ efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
- if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
- rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
+ if ((efx & hash_type) == hash_type)
+ rte |= rss->hf_map[i].rte;
+ }
- return rss_hf;
+ return rte;
}
static int
sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
struct rte_eth_rss_conf *conf)
{
- efx_rx_hash_type_t efx_hash_types = sa->rss_hash_types;
+ struct sfc_rss *rss = &sa->rss;
+ efx_rx_hash_type_t efx_hash_types = rss->hash_types;
+ uint64_t rss_hf = sfc_rx_hf_efx_to_rte(sa, efx_hash_types);
+ int rc;
- if (sa->rss_support != EFX_RX_SCALE_EXCLUSIVE) {
- if ((conf->rss_hf != 0 && conf->rss_hf != SFC_RSS_OFFLOADS) ||
+ if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
+ if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
conf->rss_key != NULL)
return EINVAL;
}
if (conf->rss_hf != 0) {
- if ((conf->rss_hf & ~SFC_RSS_OFFLOADS) != 0) {
- sfc_err(sa, "unsupported hash functions requested");
- return EINVAL;
- }
-
- efx_hash_types = sfc_rte_to_efx_hash_type(conf->rss_hf);
+ rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
+ if (rc != 0)
+ return rc;
}
if (conf->rss_key != NULL) {
- if (conf->rss_key_len != sizeof(sa->rss_key)) {
+ if (conf->rss_key_len != sizeof(rss->key)) {
sfc_err(sa, "RSS key size is wrong (should be %lu)",
- sizeof(sa->rss_key));
+ sizeof(rss->key));
return EINVAL;
}
- rte_memcpy(sa->rss_key, conf->rss_key, sizeof(sa->rss_key));
+ rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
}
- sa->rss_hash_types = efx_hash_types;
+ rss->hash_types = efx_hash_types;
return 0;
}
static int
sfc_rx_rss_config(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
int rc = 0;
- if (sa->rss_channels > 0) {
+ if (rss->channels > 0) {
rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- EFX_RX_HASHALG_TOEPLITZ,
- sa->rss_hash_types, B_TRUE);
+ rss->hash_alg, rss->hash_types,
+ B_TRUE);
if (rc != 0)
goto finish;
rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- sa->rss_key,
- sizeof(sa->rss_key));
+ rss->key, sizeof(rss->key));
if (rc != 0)
goto finish;
rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
- sa->rss_tbl, RTE_DIM(sa->rss_tbl));
+ rss->tbl, RTE_DIM(rss->tbl));
}
finish:
goto fail_rss_config;
for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
- if ((!sa->rxq_info[sw_index].deferred_start ||
+ if (sa->rxq_info[sw_index].rxq != NULL &&
+ (!sa->rxq_info[sw_index].deferred_start ||
sa->rxq_info[sw_index].deferred_started)) {
rc = sfc_rx_qstart(sa, sw_index);
if (rc != 0)
{
uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
sfc_rx_get_queue_offload_caps(sa);
- uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
+ struct sfc_rss *rss = &sa->rss;
int rc = 0;
switch (rxmode->mq_mode) {
/* No special checks are required */
break;
case ETH_MQ_RX_RSS:
- if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
+ if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
sfc_err(sa, "RSS is not available");
rc = EINVAL;
}
rc = EINVAL;
}
- if (offloads_rejected) {
- sfc_rx_log_offloads(sa, "device", "is unsupported",
- offloads_rejected);
- rc = EINVAL;
+ /*
+ * Requested offloads are validated against supported by ethdev,
+ * so unsupported offloads cannot be added as the result of
+ * below check.
+ */
+ if ((rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+ (offloads_supported & DEV_RX_OFFLOAD_CHECKSUM)) {
+ sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
+ rxmode->offloads |= DEV_RX_OFFLOAD_CHECKSUM;
}
- if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
- sfc_warn(sa, "FCS stripping cannot be disabled - always on");
- rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- rxmode->hw_strip_crc = 1;
+ if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+ (~rxmode->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
+ sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
+ rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
}
return rc;
int
sfc_rx_configure(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
int rc;
sa->rxq_count++;
}
- sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
- MIN(sa->rxq_count, EFX_MAXRSS) : 0;
+ rss->channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
+ MIN(sa->rxq_count, EFX_MAXRSS) : 0;
- if (sa->rss_channels > 0) {
+ if (rss->channels > 0) {
struct rte_eth_rss_conf *adv_conf_rss;
unsigned int sw_index;
for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
- sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
+ rss->tbl[sw_index] = sw_index % rss->channels;
adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
void
sfc_rx_close(struct sfc_adapter *sa)
{
+ struct sfc_rss *rss = &sa->rss;
+
sfc_rx_fini_queues(sa, 0);
- sa->rss_channels = 0;
+ rss->channels = 0;
rte_free(sa->rxq_info);
sa->rxq_info = NULL;