Removing 'DEV_RX_OFFLOAD_JUMBO_FRAME' offload flag.
Instead of drivers announce this capability, application can deduct the
capability by checking reported 'dev_info.max_mtu' or
'dev_info.max_rx_pktlen'.
And instead of application setting this flag explicitly to enable jumbo
frames, this can be deduced by driver by comparing requested 'mtu' to
'RTE_ETHER_MTU'.
Removing this additional configuration for simplification.
Suggested-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Reviewed-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
Acked-by: Michal Krawczyk <mk@semihalf.com>
port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN;
- if (port_conf.rxmode.mtu > RTE_ETHER_MTU)
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
t->internal_port = 1;
RTE_ETH_FOREACH_DEV(i) {
return;
}
- update_jumbo_frame_offload(port_id, res->value);
+ update_mtu_from_frame_size(port_id, res->value);
}
init_port_config();
void
port_mtu_set(portid_t port_id, uint16_t mtu)
{
+ struct rte_port *port = &ports[port_id];
int diag;
- struct rte_port *rte_port = &ports[port_id];
- struct rte_eth_dev_info dev_info;
- int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
- ret = eth_dev_info_get_print_err(port_id, &dev_info);
- if (ret != 0)
- return;
-
- if (mtu > dev_info.max_mtu || mtu < dev_info.min_mtu) {
- fprintf(stderr,
- "Set MTU failed. MTU:%u is not in valid range, min:%u - max:%u\n",
- mtu, dev_info.min_mtu, dev_info.max_mtu);
- return;
- }
diag = rte_eth_dev_set_mtu(port_id, mtu);
if (diag != 0) {
fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
return;
}
- rte_port->dev_conf.rxmode.mtu = mtu;
-
- if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (mtu > RTE_ETHER_MTU)
- rte_port->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- rte_port->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
+ port->dev_conf.rxmode.mtu = mtu;
}
/* Generic flow management functions. */
if (ret != 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
- ret = update_jumbo_frame_offload(pid, 0);
- if (ret != 0)
- fprintf(stderr,
- "Updating jumbo frame offload failed for port %u\n",
- pid);
-
if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
}
/*
- * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
- * MTU is also aligned.
+ * Helper function to set MTU from frame size
*
* port->dev_info should be set before calling this function.
*
- * if 'max_rx_pktlen' is zero, it is set to current device value, "MTU +
- * ETH_OVERHEAD". This is useful to update flags but not MTU value.
- *
* return 0 on success, negative on error
*/
int
-update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
+update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen)
{
struct rte_port *port = &ports[portid];
uint32_t eth_overhead;
- uint64_t rx_offloads;
uint16_t mtu, new_mtu;
- bool on;
eth_overhead = get_eth_overhead(&port->dev_info);
return -1;
}
- if (max_rx_pktlen == 0)
- max_rx_pktlen = mtu + eth_overhead;
-
- rx_offloads = port->dev_conf.rxmode.offloads;
new_mtu = max_rx_pktlen - eth_overhead;
- if (new_mtu <= RTE_ETHER_MTU) {
- rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- on = false;
- } else {
- if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
- fprintf(stderr,
- "Frame size (%u) is not supported by port %u\n",
- max_rx_pktlen, portid);
- return -1;
- }
- rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- on = true;
- }
-
- if (rx_offloads != port->dev_conf.rxmode.offloads) {
- uint16_t qid;
-
- port->dev_conf.rxmode.offloads = rx_offloads;
-
- /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
- for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
- if (on)
- port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
- }
-
if (mtu == new_mtu)
return 0;
__rte_unused void *user_param);
void add_tx_dynf_callback(portid_t portid);
void remove_tx_dynf_callback(portid_t portid);
-int update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen);
+int update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen);
/*
* Work-around of a compilation error with ICC on invocations of the
* Identify if port Speed and Duplex is matching to desired values with
``rte_eth_link_get``.
- * Check ``DEV_RX_OFFLOAD_JUMBO_FRAME`` is set with ``rte_eth_dev_info_get``.
-
* Check promiscuous mode if the drops do not occur for unique MAC address
with ``rte_eth_promiscuous_get``.
 DEV_RX_OFFLOAD_VLAN_STRIP
 DEV_RX_OFFLOAD_KEEP_CRC
- Â DEV_RX_OFFLOAD_JUMBO_FRAME
 DEV_RX_OFFLOAD_IPV4_CKSUM
 DEV_RX_OFFLOAD_UDP_CKSUM
 DEV_RX_OFFLOAD_TCP_CKSUM
Supports Rx jumbo frames.
-* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
- ``dev_conf.rxmode.mtu``.
+* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``dev_conf.rxmode.mtu``.
* **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
* **[related] API**: ``rte_eth_dev_set_mtu()``.
| DEV_RX_OFFLOAD_IPV4_CKSUM \
| DEV_RX_OFFLOAD_UDP_CKSUM \
| DEV_RX_OFFLOAD_TCP_CKSUM \
- | DEV_RX_OFFLOAD_JUMBO_FRAME \
| DEV_RX_OFFLOAD_MACSEC_STRIP \
| DEV_RX_OFFLOAD_VLAN_FILTER)
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC;
dev_info->max_rx_pktlen = BNX2X_MAX_RX_PKT_LEN;
dev_info->max_mac_addrs = BNX2X_MAX_MAC_ADDRS;
dev_info->speed_capa = ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G;
- dev_info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
dev_info->rx_desc_lim.nb_max = MAX_RX_AVAIL;
dev_info->rx_desc_lim.nb_min = MIN_RX_SIZE_NONTPA;
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_VLAN_EXTEND | \
DEV_RX_OFFLOAD_TCP_LRO | \
unsigned int i, j;
int rc;
- if (bp->eth_dev->data->mtu > RTE_ETHER_MTU) {
- bp->eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (bp->eth_dev->data->mtu > RTE_ETHER_MTU)
bp->flags |= BNXT_FLAG_JUMBO;
- } else {
- bp->eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
bp->flags &= ~BNXT_FLAG_JUMBO;
- }
/* THOR does not support ring groups.
* But we will use the array to save RSS context IDs.
if (eth_dev->data->dev_conf.rxmode.offloads &
~(DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
slave_eth_dev->data->dev_conf.rxmode.mtu =
bonded_eth_dev->data->dev_conf.rxmode.mtu;
- if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_JUMBO_FRAME)
- slave_eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- slave_eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
nb_rx_queues = bonded_eth_dev->data->nb_rx_queues;
nb_tx_queues = bonded_eth_dev->data->nb_tx_queues;
#define CNXK_NIX_RX_OFFLOAD_CAPA \
(DEV_RX_OFFLOAD_CHECKSUM | DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
- DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_TIMESTAMP | \
- DEV_RX_OFFLOAD_VLAN_STRIP | DEV_RX_OFFLOAD_SECURITY)
+ DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | DEV_RX_OFFLOAD_RSS_HASH | \
+ DEV_RX_OFFLOAD_TIMESTAMP | DEV_RX_OFFLOAD_VLAN_STRIP | \
+ DEV_RX_OFFLOAD_SECURITY)
#define RSS_IPV4_ENABLE \
(ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_UDP | \
{DEV_RX_OFFLOAD_HEADER_SPLIT, " Header Split,"},
{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN Filter,"},
{DEV_RX_OFFLOAD_VLAN_EXTEND, " VLAN Extend,"},
- {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo Frame,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{DEV_RX_OFFLOAD_SECURITY, " Security,"},
DEV_RX_OFFLOAD_IPV4_CKSUM | \
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
rxq->rspq.size = temp_nb_desc;
rxq->fl.size = temp_nb_desc;
- /* Set to jumbo mode if necessary */
- if (eth_dev->data->mtu > RTE_ETHER_MTU)
- eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, NULL,
is_pf4(adapter) ?
struct rte_mbuf *buf_bulk[n];
int ret, i;
struct rte_pktmbuf_pool_private *mbp_priv;
- u8 jumbo_en = rxq->rspq.eth_dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_JUMBO_FRAME;
/* Use jumbo mtu buffers if mbuf data room size can fit jumbo data. */
mbp_priv = rte_mempool_get_priv(rxq->rspq.mb_pool);
- if (jumbo_en &&
- ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000))
+ if ((mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) >= 9000)
buf_size_idx = RX_LARGE_MTU_BUF;
ret = rte_mempool_get_bulk(rxq->rspq.mb_pool, (void *)buf_bulk, n);
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER;
/* Rx offloads which cannot be disabled */
uint64_t flags;
const char *output;
} rx_offload_map[] = {
- {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"},
{DEV_RX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"},
{DEV_RX_OFFLOAD_UDP_CKSUM, " UDP csum,"},
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_TIMESTAMP;
/* Rx offloads which cannot be disabled */
{DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"},
{DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"},
{DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"},
- {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"},
{DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"},
{DEV_RX_OFFLOAD_RSS_HASH, " RSS,"},
{DEV_RX_OFFLOAD_SCATTER, " Scattered,"}
void em_dev_clear_queues(struct rte_eth_dev *dev);
void em_dev_free_queues(struct rte_eth_dev *dev);
-uint64_t em_get_rx_port_offloads_capa(struct rte_eth_dev *dev);
-uint64_t em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev);
+uint64_t em_get_rx_port_offloads_capa(void);
+uint64_t em_get_rx_queue_offloads_capa(void);
int eth_em_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
dev_info->max_rx_queues = 1;
dev_info->max_tx_queues = 1;
- dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa(dev);
- dev_info->rx_offload_capa = em_get_rx_port_offloads_capa(dev) |
+ dev_info->rx_queue_offload_capa = em_get_rx_queue_offloads_capa();
+ dev_info->rx_offload_capa = em_get_rx_port_offloads_capa() |
dev_info->rx_queue_offload_capa;
dev_info->tx_queue_offload_capa = em_get_tx_queue_offloads_capa(dev);
dev_info->tx_offload_capa = em_get_tx_port_offloads_capa(dev) |
}
uint64_t
-em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
+em_get_rx_port_offloads_capa(void)
{
uint64_t rx_offload_capa;
- uint32_t max_rx_pktlen;
-
- max_rx_pktlen = em_get_max_pktlen(dev);
rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER;
- if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
- rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
return rx_offload_capa;
}
uint64_t
-em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
+em_get_rx_queue_offloads_capa(void)
{
uint64_t rx_queue_offload_capa;
* capability be same to per port queue offloading capability
* for better convenience.
*/
- rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
+ rx_queue_offload_capa = em_get_rx_port_offloads_capa();
return rx_queue_offload_capa;
}
* to avoid splitting packets that don't fit into
* one buffer.
*/
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
+ if (dev->data->mtu > RTE_ETHER_MTU ||
rctl_bsize < RTE_ETHER_MAX_LEN) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
if ((hw->mac.type == e1000_ich9lan ||
hw->mac.type == e1000_pch2lan ||
hw->mac.type == e1000_ich10lan) &&
- rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ dev->data->mtu > RTE_ETHER_MTU) {
u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
}
if (hw->mac.type == e1000_pch2lan) {
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ if (dev->data->mtu > RTE_ETHER_MTU)
e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
else
e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
/*
* Configure support of jumbo frames, if any.
*/
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ if (dev->data->mtu > RTE_ETHER_MTU)
rctl |= E1000_RCTL_LPE;
else
rctl &= ~E1000_RCTL_LPE;
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_RSS_HASH;
* Configure support of jumbo frames, if any.
*/
max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if ((dev->data->mtu & RTE_ETHER_MTU) != 0) {
rctl |= E1000_RCTL_LPE;
/*
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM;
- rx_feat |= DEV_RX_OFFLOAD_JUMBO_FRAME;
tx_feat |= DEV_TX_OFFLOAD_MULTI_SEGS;
/* Inform framework about available features */
(DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME);
+ DEV_RX_OFFLOAD_KEEP_CRC);
return 0;
}
DEV_TX_OFFLOAD_TCP_TSO;
enic->rx_offload_capa =
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY |
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_HEADER_SPLIT |
DEV_RX_OFFLOAD_RSS_HASH);
}
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_RSS_HASH;
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_TCP_LRO);
info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_TCP_LRO);
info->tx_offload_capa = (DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
rxq->max_pkt_len =
RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
data->mtu + I40E_ETH_OVERHEAD);
- if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (data->mtu > RTE_ETHER_MTU) {
if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
/* Check if the jumbo frame and maximum packet length are set
* correctly.
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev->data->mtu & RTE_ETHER_MTU) {
if (max_pkt_len <= IAVF_ETH_MAX_LEN ||
max_pkt_len > IAVF_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH;
/* Check if the jumbo frame and maximum packet length are set
* correctly.
*/
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev_data->mtu > RTE_ETHER_MTU) {
if (max_pkt_len <= ICE_ETH_MAX_LEN ||
max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH;
dev_info->tx_offload_capa =
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_RSS_HASH;
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_VLAN_STRIP |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_VLAN_FILTER;
struct ice_rlan_ctx rx_ctx;
enum ice_status err;
uint16_t buf_size;
- struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
struct ice_adapter *ad = rxq->vsi->adapter;
RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
frame_size);
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev_data->mtu > RTE_ETHER_MTU) {
if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_SCTP_CKSUM | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_KEEP_CRC | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* Configure support of jumbo frames, if any. */
- if ((offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
+ if (dev->data->mtu & RTE_ETHER_MTU)
rctl |= IGC_RCTL_LPE;
else
rctl &= ~IGC_RCTL_LPE;
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
DEV_RX_OFFLOAD_VLAN_EXTEND |
- DEV_RX_OFFLOAD_VLAN_FILTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ DEV_RX_OFFLOAD_VLAN_FILTER;
dev_info->tx_queue_offload_capa = DEV_TX_OFFLOAD_MBUF_FAST_FREE;
dev_info->tx_offload_capa =
uint16_t queue_idx, uint16_t tx_rate)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct rte_eth_rxmode *rxmode;
uint32_t rf_dec, rf_int;
uint32_t bcnrc_val;
uint16_t link_speed = dev->data->dev_link.link_speed;
bcnrc_val = 0;
}
- rxmode = &dev->data->dev_conf.rxmode;
/*
* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
* register. MMW_SIZE=0x014 if 9728-byte jumbo is supported, otherwise
* set as 0x4.
*/
- if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
- (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE))
+ if (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE)
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME);
else
IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT);
IXGBE_MHADD_MFS_MASK) >> IXGBE_MHADD_MFS_SHIFT;
if (max_frs < max_frame) {
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (max_frame > IXGBE_ETH_MAX_LEN) {
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (max_frame > IXGBE_ETH_MAX_LEN)
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
- } else {
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
- }
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
max_frs = max_frame << IXGBE_MHADD_MFS_SHIFT;
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_RSS_HASH;
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if ((dev->data->mtu & RTE_ETHER_MTU) != 0) {
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
{
uint64_t offloads = DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH;
if (priv->hw_csum)
struct mlx5_dev_config *config = &priv->config;
uint64_t offloads = (DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_RSS_HASH);
if (!config->mprq.enabled)
#define MRVL_NETA_MRU_TO_MTU(mru) ((mru) - MRVL_NETA_HDRS_LEN)
/** Rx offloads capabilities */
-#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_JUMBO_FRAME | \
- DEV_RX_OFFLOAD_CHECKSUM)
+#define MVNETA_RX_OFFLOADS (DEV_RX_OFFLOAD_CHECKSUM)
/** Tx offloads capabilities */
#define MVNETA_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
/** Port Rx offload capabilities */
#define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_CHECKSUM)
/** Port Tx offloads capabilities */
ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
}
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- hw->mtu = dev->data->mtu;
+ hw->mtu = dev->data->mtu;
if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
.nb_mtu_seg_max = NFP_TX_MAX_MTU_SEG,
};
- /* All NFP devices support jumbo frames */
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-
if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_RSS_HASH;
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_VLAN_FILTER)
#define OCTEONTX_TX_OFFLOADS ( \
DEV_RX_OFFLOAD_SCTP_CKSUM | \
DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | \
DEV_RX_OFFLOAD_SCATTER | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
DEV_RX_OFFLOAD_VLAN_FILTER | \
devinfo->min_rx_bufsize = OTX_EP_MIN_RX_BUF_SIZE;
devinfo->max_rx_pktlen = OTX_EP_MAX_PKT_SZ;
- devinfo->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
- devinfo->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
+ devinfo->rx_offload_capa = DEV_RX_OFFLOAD_SCATTER;
devinfo->tx_offload_capa = DEV_TX_OFFLOAD_MULTI_SEGS;
devinfo->max_mac_addrs = OTX_EP_MAX_MAC_ADDRS;
droq_pkt->l3_len = hdr_lens.l3_len;
droq_pkt->l4_len = hdr_lens.l4_len;
- if ((droq_pkt->pkt_len > (RTE_ETHER_MAX_LEN + OTX_CUST_DATA_LEN)) &&
- !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
- rte_pktmbuf_free(droq_pkt);
- goto oq_read_fail;
- }
-
if (droq_pkt->nb_segs > 1 &&
!(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
rte_pktmbuf_free(droq_pkt);
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_RSS_HASH);
{
uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
- caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-
return caps & sfc_rx_get_offload_mask(sa);
}
#define NICVF_RX_OFFLOAD_CAPA ( \
DEV_RX_OFFLOAD_CHECKSUM | \
DEV_RX_OFFLOAD_VLAN_STRIP | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_SCATTER | \
DEV_RX_OFFLOAD_RSS_HASH)
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_KEEP_CRC |
- DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_RSS_HASH |
DEV_RX_OFFLOAD_SCATTER;
host_features = VIRTIO_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
- dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
DEV_RX_OFFLOAD_UDP_CKSUM | \
DEV_RX_OFFLOAD_TCP_CKSUM | \
DEV_RX_OFFLOAD_TCP_LRO | \
- DEV_RX_OFFLOAD_JUMBO_FRAME | \
DEV_RX_OFFLOAD_RSS_HASH)
int vmxnet3_segs_dynfield_offset = -1;
RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_SCATTER |
- DEV_RX_OFFLOAD_JUMBO_FRAME),
+ DEV_RX_OFFLOAD_SCATTER),
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
- .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
- DEV_RX_OFFLOAD_JUMBO_FRAME),
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
.rx_adv_conf = {
.rss_conf = {
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
- if (mtu_size > RTE_ETHER_MTU)
- local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
local_port_conf.rxmode.mtu = mtu_size;
if (multi_seg_required()) {
.mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
- .offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
}
memcpy(&conf, &port_conf, sizeof(conf));
- /* Set new MTU */
- if (new_mtu > RTE_ETHER_MTU)
- conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
conf.rxmode.mtu = new_mtu;
ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
- if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
return 0;
}
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
- if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
return 0;
}
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
- if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
return 0;
}
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
- if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
return 0;
}
dev_info->max_mtu);
conf->rxmode.mtu = max_pkt_len - overhead_len;
- if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ if (conf->rxmode.mtu > RTE_ETHER_MTU)
conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
return 0;
}
return -1;
}
mergeable = !!ret;
- if (ret) {
- vmdq_conf_default.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (ret)
vmdq_conf_default.rxmode.mtu = MAX_MTU;
- }
break;
case OPT_STATS_NUM:
RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
- RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
RTE_RX_OFFLOAD_BIT2STR(SCATTER),
RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
RTE_RX_OFFLOAD_BIT2STR(SECURITY),
goto rollback;
}
- if ((dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
- if (dev->data->dev_conf.rxmode.mtu < RTE_ETHER_MIN_MTU ||
- dev->data->dev_conf.rxmode.mtu > RTE_ETHER_MTU)
- /* Use default value */
- dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
- }
-
dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
/*
int ret;
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
- int is_jumbo_frame_capable = 0;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
frame_size = mtu + overhead_len;
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
-
- if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
- is_jumbo_frame_capable = 1;
}
- if (mtu > RTE_ETHER_MTU && is_jumbo_frame_capable == 0)
- return -EINVAL;
-
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
- if (ret == 0) {
+ if (ret == 0)
dev->data->mtu = mtu;
- /* switch to jumbo mode if needed */
- if (mtu > RTE_ETHER_MTU)
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
-
return eth_err(port_id, ret);
}
#define DEV_RX_OFFLOAD_HEADER_SPLIT 0x00000100
#define DEV_RX_OFFLOAD_VLAN_FILTER 0x00000200
#define DEV_RX_OFFLOAD_VLAN_EXTEND 0x00000400
-#define DEV_RX_OFFLOAD_JUMBO_FRAME 0x00000800
#define DEV_RX_OFFLOAD_SCATTER 0x00002000
/**
* Timestamp is set by the driver in RTE_MBUF_DYNFIELD_TIMESTAMP_NAME