.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
struct rte_eth_rxmode rx_mode = {
.max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
};
struct rte_eth_txmode tx_mode = {
port->need_reconfig_queues = 0;
/* setup tx queues */
for (qi = 0; qi < nb_txq; qi++) {
- port->tx_conf[qi].txq_flags =
- ETH_TXQ_FLAGS_IGNORE;
if ((numa_support) &&
(txring_numa[pi] != NUMA_NO_CONFIG))
diag = rte_eth_tx_queue_setup(pi, qi,
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_UDP_CKSUM,DEV_RX_OFFLOAD_TCP_CKSUM``.
* **[uses] rte_eth_txconf,rte_eth_txmode**: ``offloads:DEV_TX_OFFLOAD_UDP_CKSUM,DEV_TX_OFFLOAD_TCP_CKSUM,DEV_TX_OFFLOAD_SCTP_CKSUM``.
-* **[uses] user config**: ``dev_conf.rxmode.hw_ip_checksum``.
* **[uses] mbuf**: ``mbuf.ol_flags:PKT_TX_IPV4`` | ``PKT_TX_IPV6``,
``mbuf.ol_flags:PKT_TX_L4_NO_CKSUM`` | ``PKT_TX_TCP_CKSUM`` |
``PKT_TX_SCTP_CKSUM`` | ``PKT_TX_UDP_CKSUM``.
is the one which hasn't been enabled in ``rte_eth_dev_configure()`` and is requested to be enabled
in ``rte_eth_[rt]x_queue_setup()``. It must be per-queue type, otherwise trigger an error log.
-For an application to use the Tx offloads API it should set the ``ETH_TXQ_FLAGS_IGNORE`` flag in the ``txq_flags`` field located in ``rte_eth_txconf`` struct.
-In such cases it is not required to set other flags in ``txq_flags``.
-For an application to use the Rx offloads API it should set the ``ignore_offload_bitfield`` bit in the ``rte_eth_rxmode`` struct.
-In such cases it is not required to set other bitfield offloads in the ``rxmode`` struct.
-
Poll Mode Driver API
--------------------
experimental API ``rte_pktmbuf_attach_extbuf()`` is used. Removal of the macro
is to fix this semantic inconsistency.
-* ethdev: a new Tx and Rx offload API was introduced on 17.11.
- In the new API, offloads are divided into per-port and per-queue offloads.
- Offloads are disabled by default and enabled per application request.
-
- In later releases the old offloading API will be deprecated, which will include:
- - removal of ``ETH_TXQ_FLAGS_NO*`` flags.
- - removal of ``txq_flags`` field from ``rte_eth_txconf`` struct.
- - removal of the offloads bit-field from ``rte_eth_rxmode`` struct.
-
* ethdev: In v18.11 ``DEV_RX_OFFLOAD_CRC_STRIP`` offload flag will be removed, default
behavior without any flag will be changed to CRC strip.
To keep CRC ``DEV_RX_OFFLOAD_KEEP_CRC`` flag is required.
struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
dev_info->default_txconf = (struct rte_eth_txconf) {
.tx_free_thresh = AXGBE_TX_FREE_THRESH,
- .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
- ETH_TXQ_FLAGS_NOOFFLOADS,
};
}
if (txq->nb_desc % txq->free_thresh != 0)
txq->vector_disable = 1;
- if ((tx_conf->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOOFFLOADS) !=
- ETH_TXQ_FLAGS_NOOFFLOADS) {
+ if (tx_conf->offloads != 0)
txq->vector_disable = 1;
- }
/* Allocate TX ring hardware descriptors */
tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
/*
* Check if VLAN present only.
* Do not check whether L3/L4 rx checksum done by NIC or not,
- * That can be found from rte_eth_rxmode.hw_ip_checksum flag
+ * That can be found from rte_eth_rxmode.offloads flag
*/
pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0;
/* switch to jumbo mode if needed */
if ((uint32_t)mtu > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
/* update max frame size */
dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
}
}
if (max_rx_pkt_len > ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.jumbo_frame = 1;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev->data->dev_conf.rxmode.jumbo_frame = 0;
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
if (!dev->data->dev_started && restart) {
qede_dev_start(dev);
unsigned int max_fill_level;
/** Minimum number of unused Tx descriptors to do reap */
unsigned int free_thresh;
- /** Transmit queue configuration flags */
- unsigned int flags;
/** Offloads enabled on the transmit queue */
uint64_t offloads;
/** Tx queue size */
}
/*
- * The driver does not use it, but other PMDs update jumbo_frame
+ * The driver does not use it, but other PMDs update jumbo frame
* flag and max_rx_pkt_len when MTU is set.
*/
if (mtu > ETHER_MAX_LEN) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
-
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- rxmode->jumbo_frame = 1;
}
dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
memset(qinfo, 0, sizeof(*qinfo));
- qinfo->conf.txq_flags = txq_info->txq->flags;
qinfo->conf.offloads = txq_info->txq->offloads;
qinfo->conf.tx_free_thresh = txq_info->txq->free_thresh;
qinfo->conf.tx_deferred_start = txq_info->deferred_start;
if (rte_eth_dev_must_keep_crc(rxmode->offloads)) {
sfc_warn(sa, "FCS stripping cannot be disabled - always on");
rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- rxmode->hw_strip_crc = 1;
}
return rc;
txq->free_thresh =
(tx_conf->tx_free_thresh) ? tx_conf->tx_free_thresh :
SFC_TX_DEFAULT_FREE_THRESH;
- txq->flags = tx_conf->txq_flags;
txq->offloads = offloads;
rc = sfc_dma_alloc(sa, "txq", sw_index, EFX_TXQ_SIZE(txq_info->entries),
memset(&info, 0, sizeof(info));
info.max_fill_level = txq_max_fill_level;
info.free_thresh = txq->free_thresh;
- info.flags = tx_conf->txq_flags;
info.offloads = offloads;
info.txq_entries = txq_info->entries;
info.dma_desc_size_max = encp->enc_tx_dma_desc_size_max;
if (rc != 0)
goto fail_ev_qstart;
- /*
- * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
- * application which expects that IPv4 checksum offload is enabled
- * all the time as there is no legacy flag to turn off the offload.
- */
- if ((txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) ||
- (~txq->flags & ETH_TXQ_FLAGS_IGNORE))
+ if (txq->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_IPV4;
- if ((txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) ||
- ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (offloads_supported & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)))
+ if (txq->offloads & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM)
flags |= EFX_TXQ_CKSUM_INNER_IPV4;
if ((txq->offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
flags |= EFX_TXQ_CKSUM_INNER_TCPUDP;
}
- /*
- * The absence of ETH_TXQ_FLAGS_IGNORE is associated with a legacy
- * application. In turn, the absence of ETH_TXQ_FLAGS_NOXSUMTCP is
- * associated specifically with a legacy application which expects
- * both TCP checksum offload and TSO to be enabled because the legacy
- * API does not provide a dedicated mechanism to control TSO.
- */
- if ((txq->offloads & DEV_TX_OFFLOAD_TCP_TSO) ||
- ((~txq->flags & ETH_TXQ_FLAGS_IGNORE) &&
- (~txq->flags & ETH_TXQ_FLAGS_NOXSUMTCP)))
+ if (txq->offloads & DEV_TX_OFFLOAD_TCP_TSO)
flags |= EFX_TXQ_FATSOV2;
rc = efx_tx_qcreate(sa->nic, sw_index, 0, &txq->mem,
struct sfc_dp_txq *dp;
efx_txq_t *common;
unsigned int free_thresh;
- unsigned int flags;
uint64_t offloads;
};
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
- dev_info->default_txconf = (struct rte_eth_txconf) {
- .txq_flags = ETH_TXQ_FLAGS_NOOFFLOADS
- };
host_features = VTPCI_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
dev_info->speed_capa = ETH_LINK_SPEED_10G;
dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
- dev_info->default_txconf.txq_flags = ETH_TXQ_FLAGS_NOXSUMSCTP;
dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.rx_adv_conf = {
/* TX setup */
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid), &txq_conf);
/* TX setup */
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(BOND_PORT, 0, nb_txd,
rte_eth_dev_socket_id(BOND_PORT), &txq_conf);
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
for (q = 0; q < txRings; q++) {
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
memset(&cfg_port, 0, sizeof(cfg_port));
cfg_port.txmode.mq_mode = ETH_MQ_TX_NONE;
- cfg_port.rxmode.ignore_offload_bitfield = 1;
for (idx_port = 0; idx_port < cnt_ports; idx_port++) {
struct app_port *ptr_port = &app_cfg->ports[idx_port];
"rte_eth_rx_queue_setup failed"
);
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
if (rte_eth_tx_queue_setup(
idx_port, 0, nb_txd,
rte_eth_dev_socket_id(idx_port), &txconf) < 0)
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf_default.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
.rxmode = {
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
port, ret);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port),
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP),
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socket, txconf);
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP),
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
printf("Setup txq=%u,%d,%d\n", lcore_id, tx_queueid, socket_id);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, tx_queueid, nb_txd,
.rxmode = {
.max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_CRC_STRIP),
},
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
rte_lcore_to_socket_id(lcore_id), txconf);
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
.rxmode = {
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
"port%u (%d)\n", (unsigned)port, ret);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port), &txq_conf);
.mq_mode = ETH_MQ_RX_NONE,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
/* init one TX queue on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
/* init one TX queue on each port */
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
/* init one TX queue on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
/* init one TX queue on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_CHECKSUM),
},
rte_eth_dev_info_get(portid, &dev_info);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, txconf);
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_CHECKSUM),
},
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, txconf);
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_CHECKSUM),
},
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
socketid, txconf);
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_CHECKSUM),
},
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, txconf);
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
/* init one TX queue logical core on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP),
},
}
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
/* Init TX queues */
if (app.nic_tx_port_mask[port] == 1) {
static const struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
/* init one TX queue on each port */
fflush(stdout);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.tx_offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, 0, nb_txd,
rte_eth_dev_socket_id(portid),
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP),
},
}
txq_conf = info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = port_conf.txmode.offloads;
for (q = 0; q < tx_rings; q ++) {
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
struct rte_eth_conf eth_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = conf->eth_conf->rxmode.offloads;
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = conf->eth_conf->txmode.offloads;
for (i = 0; i < conf->nr_tx_rings; i++) {
ret = rte_eth_tx_queue_setup(portid, i, tx_slots,
static struct rte_mempool *mbuf_pool;
-static struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .ignore_offload_bitfield = 1,
- },
-};
+static struct rte_eth_conf port_conf_default;
struct worker_thread_args {
struct rte_ring *ring_in;
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
for (q = 0; q < txRings; q++) {
ret = rte_eth_tx_queue_setup(port_id, q, nb_txd,
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP),
},
fflush(stdout);
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
socketid, txconf);
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
- /* Setup txq_flags */
struct rte_eth_txconf *txconf;
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = port_conf.txmode.offloads;
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_CRC_STRIP),
},
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, nb_txd,
rte_eth_dev_socket_id(port_rx),
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, nb_txd,
rte_eth_dev_socket_id(port_tx),
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
tx_conf.tx_free_thresh = 0;
tx_conf.tx_rs_thresh = 0;
tx_conf.tx_deferred_start = 0;
- tx_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
/* init port */
RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
/* Initialize the port's TX queue */
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = local_port_conf.txmode.offloads;
ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd,
rte_eth_dev_socket_id(port_id),
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
for (q = 0; q < tx_rings; q++) {
retval = rte_eth_tx_queue_setup(port, q, nb_txd,
struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .ignore_offload_bitfield = 1,
},
};
const uint16_t rx_rings = 1, tx_rings = num_nodes;
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
for (q = 0; q < tx_rings; q++) {
retval = rte_eth_tx_queue_setup(port_num, q, tx_ring_size,
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
}
txconf = dev_info.default_txconf;
- txconf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf.offloads = port_conf.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
.offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
rxconf = &dev_info.default_rxconf;
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
if (!rte_eth_dev_is_valid_port(port))
return -1;
.rxmode = {
.mq_mode = ETH_MQ_RX_VMDQ_ONLY,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
/*
* VLAN strip is necessary for 1G NIC such as I350,
* this fixes bug of ipv4 forwarding in guest can't
rxconf = &dev_info.default_rxconf;
txconf = &dev_info.default_txconf;
rxconf->rx_drop_en = 1;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
/*configure the number of supported virtio devices based on VMDQ limits */
num_devices = dev_info.max_vmdq_pools;
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.max_rx_pkt_len = ETHER_MAX_LEN,
- .ignore_offload_bitfield = 1,
},
};
}
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = port_conf.txmode.offloads;
/* Allocate and set up 1 TX queue per Ethernet port. */
for (q = 0; q < tx_rings; q++) {
.rxmode = {
.mq_mode = ETH_MQ_RX_VMDQ_ONLY,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
},
.txmode = {
rxconf = &dev_info.default_rxconf;
rxconf->rx_drop_en = 1;
txconf = &dev_info.default_txconf;
- txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
txconf->offloads = port_conf.txmode.offloads;
for (q = 0; q < rxRings; q++) {
retval = rte_eth_rx_queue_setup(port, q, rxRingSize,
.rxmode = {
.mq_mode = ETH_MQ_RX_VMDQ_DCB,
.split_hdr_size = 0,
- .ignore_offload_bitfield = 1,
},
.txmode = {
.mq_mode = ETH_MQ_TX_VMDQ_DCB,
}
txq_conf = dev_info.default_txconf;
- txq_conf.txq_flags = ETH_TXQ_FLAGS_IGNORE;
txq_conf.offloads = port_conf.txmode.offloads;
for (q = 0; q < num_queues; q++) {
retval = rte_eth_tx_queue_setup(port, q, txRingSize,
}
}
-/**
- * A conversion function from rxmode bitfield API.
- */
-static void
-rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
- uint64_t *rx_offloads)
-{
- uint64_t offloads = 0;
-
- if (rxmode->header_split == 1)
- offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
- if (rxmode->hw_ip_checksum == 1)
- offloads |= DEV_RX_OFFLOAD_CHECKSUM;
- if (rxmode->hw_vlan_filter == 1)
- offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
- if (rxmode->hw_vlan_strip == 1)
- offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
- if (rxmode->hw_vlan_extend == 1)
- offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
- if (rxmode->jumbo_frame == 1)
- offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- if (rxmode->hw_strip_crc == 1)
- offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
- if (rxmode->enable_scatter == 1)
- offloads |= DEV_RX_OFFLOAD_SCATTER;
- if (rxmode->enable_lro == 1)
- offloads |= DEV_RX_OFFLOAD_TCP_LRO;
- if (rxmode->hw_timestamp == 1)
- offloads |= DEV_RX_OFFLOAD_TIMESTAMP;
- if (rxmode->security == 1)
- offloads |= DEV_RX_OFFLOAD_SECURITY;
-
- *rx_offloads = offloads;
-}
-
const char * __rte_experimental
rte_eth_dev_rx_offload_name(uint64_t offload)
{
return -EBUSY;
}
- /*
- * Convert between the offloads API to enable PMDs to support
- * only one of them.
- */
- if (dev_conf->rxmode.ignore_offload_bitfield == 0)
- rte_eth_convert_rx_offload_bitfield(
- &dev_conf->rxmode, &local_conf.rxmode.offloads);
-
/* Copy the dev_conf parameter into the dev structure */
memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
rx_conf = &dev_info.default_rxconf;
local_conf = *rx_conf;
- if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
- /**
- * Reflect port offloads to queue offloads in order for
- * offloads to not be discarded.
- */
- rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
- &local_conf.offloads);
- }
/*
* If an offloading has already been enabled in
return eth_err(port_id, ret);
}
-/**
- * Convert from tx offloads to txq_flags.
- */
-static void
-rte_eth_convert_tx_offload(const uint64_t tx_offloads, uint32_t *txq_flags)
-{
- uint32_t flags = 0;
-
- if (!(tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS))
- flags |= ETH_TXQ_FLAGS_NOMULTSEGS;
- if (!(tx_offloads & DEV_TX_OFFLOAD_VLAN_INSERT))
- flags |= ETH_TXQ_FLAGS_NOVLANOFFL;
- if (!(tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMSCTP;
- if (!(tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMUDP;
- if (!(tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM))
- flags |= ETH_TXQ_FLAGS_NOXSUMTCP;
- if (tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
- flags |= ETH_TXQ_FLAGS_NOREFCOUNT | ETH_TXQ_FLAGS_NOMULTMEMP;
-
- *txq_flags = flags;
-}
-
-/**
- * A conversion function from txq_flags API.
- */
-static void
-rte_eth_convert_txq_flags(const uint32_t txq_flags, uint64_t *tx_offloads)
-{
- uint64_t offloads = 0;
-
- if (!(txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS))
- offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
- if (!(txq_flags & ETH_TXQ_FLAGS_NOVLANOFFL))
- offloads |= DEV_TX_OFFLOAD_VLAN_INSERT;
- if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP))
- offloads |= DEV_TX_OFFLOAD_SCTP_CKSUM;
- if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMUDP))
- offloads |= DEV_TX_OFFLOAD_UDP_CKSUM;
- if (!(txq_flags & ETH_TXQ_FLAGS_NOXSUMTCP))
- offloads |= DEV_TX_OFFLOAD_TCP_CKSUM;
- if ((txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT) &&
- (txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP))
- offloads |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
-
- *tx_offloads = offloads;
-}
-
int
rte_eth_tx_queue_setup(uint16_t port_id, uint16_t tx_queue_id,
uint16_t nb_tx_desc, unsigned int socket_id,
if (tx_conf == NULL)
tx_conf = &dev_info.default_txconf;
- /*
- * Convert between the offloads API to enable PMDs to support
- * only one of them.
- */
local_conf = *tx_conf;
- if (!(tx_conf->txq_flags & ETH_TXQ_FLAGS_IGNORE)) {
- rte_eth_convert_txq_flags(tx_conf->txq_flags,
- &local_conf.offloads);
- }
/*
* If an offloading has already been enabled in
rte_eth_dev_info_get(uint16_t port_id, struct rte_eth_dev_info *dev_info)
{
struct rte_eth_dev *dev;
- struct rte_eth_txconf *txconf;
const struct rte_eth_desc_lim lim = {
.nb_max = UINT16_MAX,
.nb_min = 0,
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
dev_info->dev_flags = &dev->data->dev_flags;
- txconf = &dev_info->default_txconf;
- /* convert offload to txq_flags to support legacy app */
- rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
}
int
struct rte_eth_txq_info *qinfo)
{
struct rte_eth_dev *dev;
- struct rte_eth_txconf *txconf = &qinfo->conf;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
memset(qinfo, 0, sizeof(*qinfo));
dev->dev_ops->txq_info_get(dev, queue_id, qinfo);
- /* convert offload to txq_flags to support legacy app */
- rte_eth_convert_tx_offload(txconf->offloads, &txconf->txq_flags);
return 0;
}
struct rte_eth_rxmode {
/** The multi-queue packet distribution mode to be used, e.g. RSS. */
enum rte_eth_rx_mq_mode mq_mode;
- uint32_t max_rx_pkt_len; /**< Only used if jumbo_frame enabled. */
+ uint32_t max_rx_pkt_len; /**< Only used if JUMBO_FRAME enabled. */
uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/
/**
* Per-port Rx offloads to be set using DEV_RX_OFFLOAD_* flags.
* structure are allowed to be set.
*/
uint64_t offloads;
- __extension__
- /**
- * Below bitfield API is obsolete. Application should
- * enable per-port offloads using the offload field
- * above.
- */
- uint16_t header_split : 1, /**< Header Split enable. */
- hw_ip_checksum : 1, /**< IP/UDP/TCP checksum offload enable. */
- hw_vlan_filter : 1, /**< VLAN filter enable. */
- hw_vlan_strip : 1, /**< VLAN strip enable. */
- hw_vlan_extend : 1, /**< Extended VLAN enable. */
- jumbo_frame : 1, /**< Jumbo Frame Receipt enable. */
- hw_strip_crc : 1, /**< Enable CRC stripping by hardware. */
- enable_scatter : 1, /**< Enable scatter packets rx handler */
- enable_lro : 1, /**< Enable LRO */
- hw_timestamp : 1, /**< Enable HW timestamp */
- security : 1, /**< Enable rte_security offloads */
- /**
- * When set the offload bitfield should be ignored.
- * Instead per-port Rx offloads should be set on offloads
- * field above.
- * Per-queue offloads shuold be set on rte_eth_rxq_conf
- * structure.
- * This bit is temporary till rxmode bitfield offloads API will
- * be deprecated.
- */
- ignore_offload_bitfield : 1;
};
/**
uint64_t offloads;
};
-#define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */
-#define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002 /**< refcnt can be ignored */
-#define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004 /**< all bufs come from same mempool */
-#define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100 /**< disable VLAN offload */
-#define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200 /**< disable SCTP checksum offload */
-#define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400 /**< disable UDP checksum offload */
-#define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800 /**< disable TCP checksum offload */
-#define ETH_TXQ_FLAGS_NOOFFLOADS \
- (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
- ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
-#define ETH_TXQ_FLAGS_NOXSUMS \
- (ETH_TXQ_FLAGS_NOXSUMSCTP | ETH_TXQ_FLAGS_NOXSUMUDP | \
- ETH_TXQ_FLAGS_NOXSUMTCP)
-/**
- * When set the txq_flags should be ignored,
- * instead per-queue Tx offloads will be set on offloads field
- * located on rte_eth_txq_conf struct.
- * This flag is temporary till the rte_eth_txq_conf.txq_flags
- * API will be deprecated.
- */
-#define ETH_TXQ_FLAGS_IGNORE 0x8000
-
/**
* A structure used to configure a TX ring of an Ethernet port.
*/
uint16_t tx_free_thresh; /**< Start freeing TX buffers if there are
less free descriptors than this value. */
- uint32_t txq_flags; /**< Set flags for the Tx queue */
uint8_t tx_deferred_start; /**< Do not start queue with rte_eth_dev_start(). */
/**
* Per-queue Tx offloads to be set using DEV_TX_OFFLOAD_* flags.
* The *tx_rs_thresh* value should be less or equal then
* *tx_free_thresh* value, and both of them should be less then
* *nb_tx_desc* - 3.
- * - The *txq_flags* member contains flags to pass to the TX queue setup
- * function to configure the behavior of the TX queue. This should be set
- * to 0 if no special configuration is required.
- * This API is obsolete and will be deprecated. Applications
- * should set it to ETH_TXQ_FLAGS_IGNORE and use
- * the offloads field below.
* - The *offloads* member contains Tx offloads to be enabled.
* If an offloading set in tx_conf->offloads
* hasn't been set in the input argument eth_conf->txmode.offloads
*
* If the PMD is DEV_TX_OFFLOAD_MT_LOCKFREE capable, multiple threads can
* invoke this function concurrently on the same tx queue without SW lock.
- * @see rte_eth_dev_info_get, struct rte_eth_txconf::txq_flags
+ * @see rte_eth_dev_info_get, struct rte_eth_txconf::offloads
*
* @see rte_eth_tx_prepare to perform some prior checks or adjustments
* for offloads.