DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_VLAN_EXTEND | \
DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_SCATTER)
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_RSS_HASH)
static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
bp->rx_cp_nr_rings = bp->rx_nr_rings;
bp->tx_cp_nr_rings = bp->tx_nr_rings;
+ rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+ eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
+
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
eth_dev->data->mtu =
eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_SCATTER)
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_RSS_HASH)
/* Common PF and VF devargs */
CXGBE_FUNC_TRACE();
+ eth_dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = cxgbe_setup_sge_fwevtq(adapter);
if (err)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_RSS_HASH;
/* Supported Tx offloads */
static uint64_t dev_tx_offloads_sup =
/* Rx offloads which cannot be disabled */
static uint64_t dev_rx_offloads_nodis =
+ DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_SCATTER;
/* Supported Tx offloads */
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* multipe queue mode checking */
ret = igb_check_mq_mode(dev);
if (ret != 0) {
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_RSS_HASH;
return rx_offload_capa;
}
return ret;
}
+ eth_dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
enic->mc_count = 0;
enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_CHECKSUM);
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_CKSUM;
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_RSS_HASH;
enic->tx_offload_mask =
PKT_TX_IPV6 |
PKT_TX_IPV4 |
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* multipe queue mode checking */
ret = fm10k_check_mq_mode(dev);
if (ret != 0) {
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_HEADER_SPLIT);
+ DEV_RX_OFFLOAD_HEADER_SPLIT |
+ DEV_RX_OFFLOAD_RSS_HASH);
}
static int
return -EINVAL;
}
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* mtu size is 256~9600 */
if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
dev->data->dev_conf.rxmode.max_rx_pkt_len >
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_TCP_LRO;
+ DEV_RX_OFFLOAD_TCP_LRO |
+ DEV_RX_OFFLOAD_RSS_HASH;
info->tx_queue_offload_capa = 0;
info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
ad->tx_simple_allowed = true;
ad->tx_vec_allowed = true;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* Only legacy filter API needs the following fdir config. So when the
* legacy filter API is deprecated, the following codes should also be
* removed.
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
ad->rx_vec_allowed = true;
ad->tx_vec_allowed = true;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* Vlan stripping setting */
if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN) {
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_VLAN_FILTER;
+ DEV_RX_OFFLOAD_VLAN_FILTER |
+ DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
DEV_TX_OFFLOAD_VLAN_INSERT |
DEV_TX_OFFLOAD_QINQ_INSERT |
ad->rx_bulk_alloc_allowed = true;
ad->tx_simple_allowed = true;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
return 0;
}
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_QINQ_STRIP |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_RX_OFFLOAD_VLAN_EXTEND;
+ DEV_RX_OFFLOAD_VLAN_EXTEND |
+ DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa |=
DEV_TX_OFFLOAD_QINQ_INSERT |
DEV_TX_OFFLOAD_IPV4_CKSUM |
int ret;
PMD_INIT_FUNC_TRACE();
+
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* multipe queue mode checking */
ret = ixgbe_check_mq_mode(dev);
if (ret != 0) {
PMD_INIT_LOG(DEBUG, "Configured Virtual Function port id: %d",
dev->data->port_id);
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/*
* VF has no ability to enable/disable HW CRC
* Keep the persistent behavior the same as Host PF
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_SCATTER;
+ DEV_RX_OFFLOAD_SCATTER |
+ DEV_RX_OFFLOAD_RSS_HASH;
if (hw->mac.type == ixgbe_mac_82598EB)
offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_VLAN_STRIP);
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_RSS_HASH);
devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
DEV_TX_OFFLOAD_UDP_CKSUM |
DEV_TX_OFFLOAD_TCP_CKSUM |
PMD_INIT_FUNC_TRACE();
+ eth_dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* Inform firmware about change in number of queues to use.
* Disable IO queues and reset registers for re-configuration.
*/
struct rte_flow_error error;
int ret;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* Prepare internal flow rules. */
ret = mlx4_flow_sync(priv, &error);
if (ret) {
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_RSS_HASH;
if (priv->hw_csum)
offloads |= DEV_RX_OFFLOAD_CHECKSUM;
rte_errno = ENOMEM;
return -rte_errno;
}
+
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
memcpy(priv->rss_conf.rss_key,
use_app_rss_key ?
dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key :
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_JUMBO_FRAME);
+ DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_RSS_HASH);
if (config->hw_fcs_strip)
offloads |= DEV_RX_OFFLOAD_KEEP_CRC;
PMD_INIT_FUNC_TRACE();
+ dev_conf->rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
unsupported = txmode->offloads & ~HN_TX_OFFLOAD_CAPS;
if (unsupported) {
PMD_DRV_LOG(NOTICE,
== HN_NDIS_LSOV2_CAP_IP6)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_RSS_HASH;
if (hwcaps.ndis_csum.ndis_ip4_rxcsum & NDIS_RXCSUM_CAP_IP4)
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM;
rxmode = &dev_conf->rxmode;
txmode = &dev_conf->txmode;
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* Checking TX mode */
if (txmode->mq_mode) {
PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME |
+ DEV_RX_OFFLOAD_RSS_HASH;
if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
struct rte_eth_rxmode *rxmode = &conf->rxmode;
uint16_t flags = 0;
- if (rxmode->mq_mode == ETH_MQ_RX_RSS)
+ if (rxmode->mq_mode == ETH_MQ_RX_RSS &&
+ (dev->rx_offloads & DEV_RX_OFFLOAD_RSS_HASH))
flags |= NIX_RX_OFFLOAD_RSS_F;
if (dev->rx_offloads & (DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_MT_LOCKFREE | \
DEV_TX_OFFLOAD_VLAN_INSERT | \
DEV_TX_OFFLOAD_QINQ_INSERT | \
- DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
- DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | \
+ DEV_TX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_TX_OFFLOAD_TCP_CKSUM | \
DEV_TX_OFFLOAD_UDP_CKSUM | \
DEV_TX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_QINQ_STRIP | \
- DEV_RX_OFFLOAD_TIMESTAMP)
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
+ DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_VLAN_FILTER | \
+ DEV_RX_OFFLOAD_QINQ_STRIP | \
+ DEV_RX_OFFLOAD_TIMESTAMP | \
+ DEV_RX_OFFLOAD_RSS_HASH)
#define NIX_DEFAULT_RSS_CTX_GROUP 0
#define NIX_DEFAULT_RSS_MCAM_IDX -1
PMD_INIT_FUNC_TRACE(edev);
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
/* We need to have min 1 RX queue.There is no min check in
* rte_eth_dev_configure(), so we are checking it here.
*/
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_VLAN_STRIP);
+ DEV_RX_OFFLOAD_VLAN_STRIP |
+ DEV_RX_OFFLOAD_RSS_HASH);
dev_info->rx_queue_offload_capa = 0;
/* TX offloads are on a per-packet basis, so it is applicable
},
.features = SFC_DP_RX_FEAT_FLOW_FLAG |
SFC_DP_RX_FEAT_FLOW_MARK,
- .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM,
+ .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
+ DEV_RX_OFFLOAD_RSS_HASH,
.queue_offload_capa = 0,
.get_dev_info = sfc_ef10_essb_rx_get_dev_info,
.pool_ops_supported = sfc_ef10_essb_rx_pool_ops_supported,
.features = SFC_DP_RX_FEAT_MULTI_PROCESS |
SFC_DP_RX_FEAT_INTR,
.dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM,
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_RSS_HASH,
.queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
.get_dev_info = sfc_ef10_rx_get_dev_info,
.qsize_up_rings = sfc_ef10_rx_qsize_up_rings,
.hw_fw_caps = 0,
},
.features = SFC_DP_RX_FEAT_INTR,
- .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM,
+ .dev_offload_capa = DEV_RX_OFFLOAD_CHECKSUM |
+ DEV_RX_OFFLOAD_RSS_HASH,
.queue_offload_capa = DEV_RX_OFFLOAD_SCATTER,
.qsize_up_rings = sfc_efx_rx_qsize_up_rings,
.qcreate = sfc_efx_rx_qcreate,
rxmode->offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
}
+ if ((offloads_supported & DEV_RX_OFFLOAD_RSS_HASH) &&
+ (~rxmode->offloads & DEV_RX_OFFLOAD_RSS_HASH))
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
return rc;
}
PMD_INIT_FUNC_TRACE();
+ rxmode->offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
if (!rte_eal_has_hugepages()) {
PMD_INIT_LOG(INFO, "Huge page is not configured");
return -EINVAL;
DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_SCATTER)
+ DEV_RX_OFFLOAD_SCATTER | \
+ DEV_RX_OFFLOAD_RSS_HASH)
#define NICVF_DEFAULT_RX_FREE_THRESH 224
#define NICVF_DEFAULT_TX_FREE_THRESH 224
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_JUMBO_FRAME)
+ DEV_RX_OFFLOAD_JUMBO_FRAME | \
+ DEV_RX_OFFLOAD_RSS_HASH)
static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
+
if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");