struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.rx_adv_conf = {
return -EINVAL;
}
- port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz;
- if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN)
+ port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
+ if (port_conf.rxmode.mtu > RTE_ETHER_MTU)
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
t->internal_port = 1;
__rte_unused void *data)
{
struct cmd_config_max_pkt_len_result *res = parsed_result;
- uint32_t max_rx_pkt_len_backup = 0;
- portid_t pid;
+ portid_t port_id;
int ret;
+ if (strcmp(res->name, "max-pkt-len") != 0) {
+ printf("Unknown parameter\n");
+ return;
+ }
+
if (!all_ports_stopped()) {
fprintf(stderr, "Please stop all ports first\n");
return;
}
- RTE_ETH_FOREACH_DEV(pid) {
- struct rte_port *port = &ports[pid];
+ RTE_ETH_FOREACH_DEV(port_id) {
+ struct rte_port *port = &ports[port_id];
- if (!strcmp(res->name, "max-pkt-len")) {
- if (res->value < RTE_ETHER_MIN_LEN) {
- fprintf(stderr,
- "max-pkt-len can not be less than %d\n",
- RTE_ETHER_MIN_LEN);
- return;
- }
- if (res->value == port->dev_conf.rxmode.max_rx_pkt_len)
- return;
-
- ret = eth_dev_info_get_print_err(pid, &port->dev_info);
- if (ret != 0) {
- fprintf(stderr,
- "rte_eth_dev_info_get() failed for port %u\n",
- pid);
- return;
- }
-
- max_rx_pkt_len_backup = port->dev_conf.rxmode.max_rx_pkt_len;
+ if (res->value < RTE_ETHER_MIN_LEN) {
+ fprintf(stderr,
+ "max-pkt-len can not be less than %d\n",
+ RTE_ETHER_MIN_LEN);
+ return;
+ }
- port->dev_conf.rxmode.max_rx_pkt_len = res->value;
- if (update_jumbo_frame_offload(pid) != 0)
- port->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len_backup;
- } else {
- fprintf(stderr, "Unknown parameter\n");
+ ret = eth_dev_info_get_print_err(port_id, &port->dev_info);
+ if (ret != 0) {
+ fprintf(stderr,
+ "rte_eth_dev_info_get() failed for port %u\n",
+ port_id);
return;
}
+
+ update_jumbo_frame_offload(port_id, res->value);
}
init_port_config();
int diag;
struct rte_port *rte_port = &ports[port_id];
struct rte_eth_dev_info dev_info;
- uint16_t eth_overhead;
int ret;
if (port_id_is_invalid(port_id, ENABLED_WARN))
return;
}
diag = rte_eth_dev_set_mtu(port_id, mtu);
- if (diag)
+ if (diag != 0) {
fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
- else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- /*
- * Ether overhead in driver is equal to the difference of
- * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
- * device supports jumbo frame.
- */
- eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
- if (mtu > RTE_ETHER_MTU) {
+ return;
+ }
+
+ rte_port->dev_conf.rxmode.mtu = mtu;
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (mtu > RTE_ETHER_MTU)
rte_port->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
- rte_port->dev_conf.rxmode.max_rx_pkt_len =
- mtu + eth_overhead;
- } else
+ else
rte_port->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
}
if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
n = atoi(optarg);
if (n >= RTE_ETHER_MIN_LEN)
- rx_mode.max_rx_pkt_len = (uint32_t) n;
+ max_rx_pkt_len = n;
else
rte_exit(EXIT_FAILURE,
"Invalid max-pkt-len=%d - should be > %d\n",
*/
uint8_t f_quit;
+/*
+ * Max Rx frame size, set by '--max-pkt-len' parameter.
+ */
+uint32_t max_rx_pkt_len;
+
/*
* Configuration of packet segments used to scatter received packets
* if some of split features is configured.
/*
* Ethernet device configuration.
*/
-struct rte_eth_rxmode rx_mode = {
- /* Default maximum frame length.
- * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
- * in init_config().
- */
- .max_rx_pkt_len = 0,
-};
+struct rte_eth_rxmode rx_mode;
struct rte_eth_txmode tx_mode = {
.offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
return 0;
}
+static int
+get_eth_overhead(struct rte_eth_dev_info *dev_info)
+{
+ uint32_t eth_overhead;
+
+ if (dev_info->max_mtu != UINT16_MAX &&
+ dev_info->max_rx_pktlen > dev_info->max_mtu)
+ eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
+ else
+ eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return eth_overhead;
+}
+
static void
init_config_port_offloads(portid_t pid, uint32_t socket_id)
{
struct rte_port *port = &ports[pid];
- uint16_t data_size;
int ret;
int i;
if (ret != 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
- ret = update_jumbo_frame_offload(pid);
+ ret = update_jumbo_frame_offload(pid, 0);
if (ret != 0)
fprintf(stderr,
"Updating jumbo frame offload failed for port %u\n",
if (eth_link_speed)
port->dev_conf.link_speeds = eth_link_speed;
+ if (max_rx_pkt_len)
+ port->dev_conf.rxmode.mtu = max_rx_pkt_len -
+ get_eth_overhead(&port->dev_info);
+
/* set flag to initialize port/queue */
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
*/
if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
- data_size = rx_mode.max_rx_pkt_len /
- port->dev_info.rx_desc_lim.nb_mtu_seg_max;
-
- if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
- mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
- TESTPMD_LOG(WARNING,
- "Configured mbuf size of the first segment %hu\n",
- mbuf_data_size[0]);
+ uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
+ uint16_t mtu;
+
+ if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
+ uint16_t data_size = (mtu + eth_overhead) /
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max;
+ uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
+
+ if (buffer_size > mbuf_data_size[0]) {
+ mbuf_data_size[0] = buffer_size;
+ TESTPMD_LOG(WARNING,
+ "Configured mbuf size of the first segment %hu\n",
+ mbuf_data_size[0]);
+ }
}
}
}
pi);
return -1;
}
+
/* configure port */
diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
nb_txq + nb_hairpinq,
/*
* Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
- * MTU is also aligned if JUMBO_FRAME offload is not set.
+ * MTU is also aligned.
*
* port->dev_info should be set before calling this function.
*
+ * if 'max_rx_pktlen' is zero, it is set to current device value, "MTU +
+ * ETH_OVERHEAD". This is useful to update flags but not MTU value.
+ *
* return 0 on success, negative on error
*/
int
-update_jumbo_frame_offload(portid_t portid)
+update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
{
struct rte_port *port = &ports[portid];
uint32_t eth_overhead;
uint64_t rx_offloads;
- int ret;
+ uint16_t mtu, new_mtu;
bool on;
- /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
- if (port->dev_info.max_mtu != UINT16_MAX &&
- port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
- eth_overhead = port->dev_info.max_rx_pktlen -
- port->dev_info.max_mtu;
- else
- eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ eth_overhead = get_eth_overhead(&port->dev_info);
- rx_offloads = port->dev_conf.rxmode.offloads;
+ if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
+ printf("Failed to get MTU for port %u\n", portid);
+ return -1;
+ }
- /* Default config value is 0 to use PMD specific overhead */
- if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
- port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
+ if (max_rx_pktlen == 0)
+ max_rx_pktlen = mtu + eth_overhead;
+
+ rx_offloads = port->dev_conf.rxmode.offloads;
+ new_mtu = max_rx_pktlen - eth_overhead;
- if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
+ if (new_mtu <= RTE_ETHER_MTU) {
rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
on = false;
} else {
if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
fprintf(stderr,
"Frame size (%u) is not supported by port %u\n",
- port->dev_conf.rxmode.max_rx_pkt_len,
- portid);
+ max_rx_pktlen, portid);
return -1;
}
rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
}
- /* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
- * if unset do it here
- */
- if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
- ret = eth_dev_set_mtu_mp(portid,
- port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
- if (ret)
- fprintf(stderr,
- "Failed to set MTU to %u for port %u\n",
- port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
- portid);
+ if (mtu == new_mtu)
+ return 0;
+
+ if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
+ fprintf(stderr,
+ "Failed to set MTU to %u for port %u\n",
+ new_mtu, portid);
+ return -1;
}
+ port->dev_conf.rxmode.mtu = new_mtu;
+
return 0;
}
extern struct rte_fdir_conf fdir_conf;
+extern uint32_t max_rx_pkt_len;
+
/*
* Configuration of packet segments used to scatter received packets
* if some of split features is configured.
__rte_unused void *user_param);
void add_tx_dynf_callback(portid_t portid);
void remove_tx_dynf_callback(portid_t portid);
-int update_jumbo_frame_offload(portid_t portid);
+int update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen);
/*
* Work-around of a compilation error with ICC on invocations of the
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
.split_hdr_size = 0,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static struct rte_eth_conf default_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static struct rte_eth_conf rss_pmd_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
~~~~~~~~~~~~~~~~~~~~~
The DPAA SoC family support a maximum of a 10240 jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
up to 10240 bytes can still reach the host interface.
~~~~~~~~~~~~~~~~~~~~~
The DPAA2 SoC family support a maximum of a 10240 jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
up to 10240 bytes can still reach the host interface.
Supports Rx jumbo frames.
* **[uses] rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
- ``dev_conf.rxmode.max_rx_pkt_len``.
+ ``dev_conf.rxmode.mtu``.
* **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
* **[related] API**: ``rte_eth_dev_set_mtu()``.
~~~~~~~~~~~~~~~~~~~~~
The FM10000 family of NICS support a maximum of a 15K jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
member of ``struct rte_eth_conf`` is set to a value lower than 15364, frames
up to 15364 bytes can still reach the host interface.
and each stride receives one packet. MPRQ can improve throughput for
small-packet traffic.
- When MPRQ is enabled, max_rx_pkt_len can be larger than the size of
+ When MPRQ is enabled, MTU can be larger than the size of
user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
- configure large stride size enough to accommodate max_rx_pkt_len as long as
+ configure large stride size enough to accommodate MTU as long as
device allows. Note that this can waste system memory compared to enabling Rx
scatter and multi-segment packet.
~~~~~~~~~~~~~~~~~~~~~
The OCTEON TX SoC family NICs support a maximum of a 32K jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
up to 32k bytes can still reach the host interface.
~~~~~~~~~~~~~~~~~~~~~
The ThunderX SoC family NICs support a maximum of a 9K jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
member of ``struct rte_eth_conf`` is set to a value lower than 9200, frames
up to 9200 bytes can still reach the host interface.
In 19.11 PMDs will still update the field even when the offload is not
enabled.
-* ethdev: ``uint32_t max_rx_pkt_len`` field of ``struct rte_eth_rxmode``, will be
- replaced by a new ``uint32_t mtu`` field of ``struct rte_eth_conf`` in v21.11.
- The new ``mtu`` field will be used to configure the initial device MTU via
- ``rte_eth_dev_configure()`` API.
- Later MTU can be changed by ``rte_eth_dev_set_mtu()`` API as done now.
- The existing ``(struct rte_eth_dev)->data->mtu`` variable will be used to store
- the configured ``mtu`` value,
- and this new ``(struct rte_eth_dev)->data->dev_conf.mtu`` variable will
- be used to store the user configuration request.
- Unlike ``max_rx_pkt_len``, which was valid only when ``JUMBO_FRAME`` enabled,
- ``mtu`` field will be always valid.
- When ``mtu`` config is not provided by the application, default ``RTE_ETHER_MTU``
- value will be used.
- ``(struct rte_eth_dev)->data->mtu`` should be updated after MTU set successfully,
- either by ``rte_eth_dev_configure()`` or ``rte_eth_dev_set_mtu()``.
-
- An application may need to configure device for a specific Rx packet size, like for
- cases ``DEV_RX_OFFLOAD_SCATTER`` is not supported and device received packet size
- can't be bigger than Rx buffer size.
- To cover these cases an application needs to know the device packet overhead to be
- able to calculate the ``mtu`` corresponding to a Rx buffer size, for this
- ``(struct rte_eth_dev_info).max_rx_pktlen`` will be kept,
- the device packet overhead can be calculated as:
- ``(struct rte_eth_dev_info).max_rx_pktlen - (struct rte_eth_dev_info).max_mtu``
-
* ethdev: Announce moving from dedicated modify function for each field,
to using the general ``rte_flow_modify_field`` action.
:end-before: >8 End of initializing a given port.
The Ethernet ports are configured with default settings using the
-``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct.
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
- :language: c
- :start-after: Ethernet ports configured with default settings using struct. 8<
- :end-before: >8 End of configuration of Ethernet ports.
+``rte_eth_dev_configure()`` function.
For this example the ports are set up with 1 RX and 1 TX queue using the
``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
[--lookup LOOKUP_METHOD]
--config(port,queue,lcore)[,(port,queue,lcore)]
[--eth-dest=X,MM:MM:MM:MM:MM:MM]
- [--enable-jumbo [--max-pkt-len PKTLEN]]
+ [--max-pkt-len PKTLEN]
[--no-numa]
[--hash-entry-num]
[--ipv6]
* ``--eth-dest=X,MM:MM:MM:MM:MM:MM:`` Optional, ethernet destination for port X.
-* ``--enable-jumbo:`` Optional, enables jumbo frames.
-
-* ``--max-pkt-len:`` Optional, under the premise of enabling jumbo, maximum packet length in decimal (64-9600).
+* ``--max-pkt-len:`` Optional, maximum packet length in decimal (64-9600).
* ``--no-numa:`` Optional, disables numa awareness.
.. code-block:: console
- ./<build_dir>/examples/dpdk-l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME --rule_ipv6 FILENAME [--alg=<val>] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa] [--eth-dest=X,MM:MM:MM:MM:MM:MM]
+ ./<build_dir>/examples/dpdk-l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME --rule_ipv6 FILENAME [--alg=<val>] [--max-pkt-len PKTLEN] [--no-numa] [--eth-dest=X,MM:MM:MM:MM:MM:MM]
where,
* --alg=<val>: optional, ACL classify method to use, one of:
``scalar|sse|avx2|neon|altivec|avx512x16|avx512x32``
-* --enable-jumbo: optional, enables jumbo frames
-
* --max-pkt-len: optional, maximum packet length in decimal (64-9600)
* --no-numa: optional, disables numa awareness
[-P]
--config(port,queue,lcore)[,(port,queue,lcore)]
[--eth-dest=X,MM:MM:MM:MM:MM:MM]
- [--enable-jumbo [--max-pkt-len PKTLEN]]
+ [--max-pkt-len PKTLEN]
[--no-numa]
[--per-port-pool]
* ``--eth-dest=X,MM:MM:MM:MM:MM:MM:`` Optional, ethernet destination for port X.
-* ``--enable-jumbo:`` Optional, enables jumbo frames.
-
-* ``--max-pkt-len:`` Optional, under the premise of enabling jumbo, maximum packet length in decimal (64-9600).
+* ``--max-pkt-len:`` Optional, maximum packet length in decimal (64-9600).
* ``--no-numa:`` Optional, disables numa awareness.
.. code-block:: console
- ./<build_dir>/examples/dpdk-l3fwd_power [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa]
+ ./<build_dir>/examples/dpdk-l3fwd_power [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] [--max-pkt-len PKTLEN] [--no-numa]
where,
* --config (port,queue,lcore)[,(port,queue,lcore)]: determines which queues from which ports are mapped to which cores.
-* --enable-jumbo: optional, enables jumbo frames
-
* --max-pkt-len: optional, maximum packet length in decimal (64-9600)
* --no-numa: optional, disables numa awareness
-p PORTMASK [-P]
--rx(port,queue,lcore,thread)[,(port,queue,lcore,thread)]
--tx(lcore,thread)[,(lcore,thread)]
- [--enable-jumbo] [--max-pkt-len PKTLEN]] [--no-numa]
+ [--max-pkt-len PKTLEN] [--no-numa]
[--hash-entry-num] [--ipv6] [--no-lthreads] [--stat-lcore lcore]
[--parse-ptype]
the lcore the thread runs on, and the id of RX thread with which it is
associated. The parameters are explained below.
-* ``--enable-jumbo``: optional, enables jumbo frames.
-
* ``--max-pkt-len``: optional, maximum packet length in decimal (64-9600).
* ``--no-numa``: optional, disables numa awareness.
:end-before: >8 End of main functional part of port initialization.
The Ethernet ports are configured with default settings using the
-``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct:
-
-.. literalinclude:: ../../../examples/skeleton/basicfwd.c
- :language: c
- :start-after: Configuration of ethernet ports. 8<
- :end-before: >8 End of configuration of ethernet ports.
+``rte_eth_dev_configure()`` function.
For this example the ports are set up with 1 RX and 1 TX queue using the
``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
return -EINVAL;
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
return 0;
}
avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
struct avp_dev *avp)
{
- unsigned int max_rx_pkt_len;
+ unsigned int max_rx_pktlen;
- max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ max_rx_pktlen = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
- if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
- (max_rx_pkt_len > avp->host_mbuf_size)) {
+ if (max_rx_pktlen > avp->guest_mbuf_size ||
+ max_rx_pktlen > avp->host_mbuf_size) {
/*
* If the guest MTU is greater than either the host or guest
* buffers then chained mbufs have to be enabled in the TX
* direction. It is assumed that the application will not need
- * to send packets larger than their max_rx_pkt_len (MRU).
+ * to send packets larger than their MTU.
*/
return 1;
}
PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
avp->max_rx_pkt_len,
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN,
avp->host_mbuf_size,
avp->guest_mbuf_size);
* function; send it truncated to avoid the performance
* hit of having to manage returning the already
* allocated buffer to the free list. This should not
- * happen since the application should have set the
- * max_rx_pkt_len based on its MTU and it should be
+ * happen since the application should have not send
+ * packages larger than its MTU and it should be
* policing its own packet sizes.
*/
txq->errors++;
struct axgbe_port *pdata = dev->data->dev_private;
int ret;
struct rte_eth_dev_data *dev_data = dev->data;
- uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+ uint16_t max_pkt_len;
dev->dev_ops = &axgbe_eth_dev_ops;
rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
+
+ max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
max_pkt_len > pdata->rx_buf_size)
dev_data->scattered_rx = 1;
dev->data->port_id);
return -EBUSY;
}
- if (frame_size > AXGBE_ETH_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
val = 1;
val = 0;
}
AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return 0;
}
bnx2x_dev_configure(struct rte_eth_dev *dev)
{
struct bnx2x_softc *sc = dev->data->dev_private;
- struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
PMD_INIT_FUNC_TRACE(sc);
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
- dev->data->mtu = sc->mtu;
- }
+ sc->mtu = dev->data->dev_conf.rxmode.mtu;
if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
- if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- eth_dev->data->mtu =
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
- BNXT_NUM_VLANS;
- bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
- }
+ bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+
return 0;
resource_error:
*/
static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
{
+ uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
uint16_t buf_size;
int i;
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
RTE_PKTMBUF_HEADROOM);
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
+ if (eth_dev->data->mtu + overhead > buf_size)
return 1;
}
return 0;
int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
{
+ uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
struct bnxt *bp = eth_dev->data->dev_private;
uint32_t new_pkt_size;
uint32_t rc = 0;
if (!eth_dev->data->nb_rx_queues)
return rc;
- new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
- VLAN_TAG_SIZE * BNXT_NUM_VLANS;
+ new_pkt_size = new_mtu + overhead;
/*
* Disallow any MTU change that would require scattered receive support
}
/* Is there a change in mtu setting? */
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
+ if (eth_dev->data->mtu == new_mtu)
return rc;
for (i = 0; i < bp->nr_vnics; i++) {
}
}
- if (!rc)
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
-
if (bnxt_hwrm_config_host_mtu(bp))
PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n");
slave_eth_dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_VLAN_FILTER;
- slave_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
- bonded_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ slave_eth_dev->data->dev_conf.rxmode.mtu =
+ bonded_eth_dev->data->dev_conf.rxmode.mtu;
if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_JUMBO_FRAME)
mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
}
{
struct rte_eth_dev_data *data = eth_dev->data;
struct cnxk_eth_rxq_sp *rxq;
- uint16_t mtu;
int rc;
rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
/* Setup scatter mode if needed by jumbo */
nix_enable_mseg_on_jumbo(rxq);
- /* Setup MTU based on max_rx_pkt_len */
- mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
- CNXK_NIX_MAX_VTAG_ACT_SIZE;
-
- rc = cnxk_nix_mtu_set(eth_dev, mtu);
+ rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
if (rc)
plt_err("Failed to set default MTU size, rc=%d", rc);
goto exit;
}
- frame_size += RTE_ETHER_CRC_LEN;
-
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
- /* Update max_rx_pkt_len */
- data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
exit:
return rc;
}
return err;
/* Must accommodate at least RTE_ETHER_MIN_MTU */
- if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
+ if (mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
return -EINVAL;
/* set to jumbo mode if needed */
- if (new_mtu > CXGBE_ETH_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
-1, -1, true);
- if (!err)
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
-
return err;
}
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
- unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int pkt_len = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
struct rte_eth_dev_info dev_info;
rxq->fl.size = temp_nb_desc;
/* Set to jumbo mode if necessary */
- if (pkt_len > CXGBE_ETH_MAX_LEN)
+ if (eth_dev->data->mtu > RTE_ETHER_MTU)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
unsigned int mtu;
int ret;
- mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
+ mtu = pi->eth_dev->data->mtu;
conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
u32 wr_mid;
u64 cntrl, *end;
bool v6;
- u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
+ u32 max_pkt_len;
/* Reject xmit if queue is stopped */
if (unlikely(txq->flags & EQ_STOPPED))
return 0;
}
+ max_pkt_len = txq->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
(unlikely(m->pkt_len > max_pkt_len)))
goto out_free;
return -EINVAL;
}
- if (frame_size > DPAA_ETH_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
fman_if_set_maxfrm(dev->process_private, frame_size);
return 0;
struct fman_if *fif = dev->process_private;
struct __fman_if *__fif;
struct rte_intr_handle *intr_handle;
+ uint32_t max_rx_pktlen;
int speed, duplex;
int ret;
tx_offloads, dev_tx_offloads_nodis);
}
- if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- uint32_t max_len;
-
- DPAA_PMD_DEBUG("enabling jumbo");
-
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- DPAA_MAX_RX_PKT_LEN)
- max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
- else {
- DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
- "supported is %d",
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- DPAA_MAX_RX_PKT_LEN);
- max_len = DPAA_MAX_RX_PKT_LEN;
- }
-
- fman_if_set_maxfrm(dev->process_private, max_len);
- dev->data->mtu = max_len
- - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
+ max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
+ if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) {
+ DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
+ "supported is %d",
+ max_rx_pktlen, DPAA_MAX_RX_PKT_LEN);
+ max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
}
+ fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
+
if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
DPAA_PMD_DEBUG("enabling scatter mode");
fman_if_set_sg(dev->process_private, 1);
u32 flags = 0;
int ret;
u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+ uint32_t max_rx_pktlen;
PMD_INIT_FUNC_TRACE();
return -EINVAL;
}
+ max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
+ VLAN_TAG_SIZE;
/* Max packet can fit in single buffer */
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
+ if (max_rx_pktlen <= buffsz) {
;
} else if (dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SCATTER) {
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- buffsz * DPAA_SGT_MAX_ENTRIES) {
- DPAA_PMD_ERR("max RxPkt size %d too big to fit "
+ if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
+ DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
"MaxSGlist %d",
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- buffsz * DPAA_SGT_MAX_ENTRIES);
+ max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES);
rte_errno = EOVERFLOW;
return -rte_errno;
}
DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
" larger than a single mbuf (%u) and scattered"
" mode has not been requested",
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- buffsz - RTE_PKTMBUF_HEADROOM);
+ max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM);
}
dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
dpaa_intf->valid = 1;
DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
- fman_if_get_sg_enable(fif),
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ fman_if_get_sg_enable(fif), max_rx_pktlen);
/* checking if push mode only, no error check for now */
if (!rxq->is_static &&
dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
int tx_l3_csum_offload = false;
int tx_l4_csum_offload = false;
int ret, tc_index;
+ uint32_t max_rx_pktlen;
PMD_INIT_FUNC_TRACE();
tx_offloads, dev_tx_offloads_nodis);
}
- if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
- ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
- priv->token, eth_conf->rxmode.max_rx_pkt_len
- - RTE_ETHER_CRC_LEN);
- if (ret) {
- DPAA2_PMD_ERR(
- "Unable to set mtu. check config");
- return ret;
- }
- dev->data->mtu =
- dev->data->dev_conf.rxmode.max_rx_pkt_len -
- RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
- VLAN_TAG_SIZE;
- DPAA2_PMD_INFO("MTU configured for the device: %d",
- dev->data->mtu);
- } else {
- return -1;
+ max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
+ if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
+ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
+ priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
+ if (ret != 0) {
+ DPAA2_PMD_ERR("Unable to set mtu. check config");
+ return ret;
}
+ DPAA2_PMD_INFO("MTU configured for the device: %d",
+ dev->data->mtu);
+ } else {
+ return -1;
}
if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
return -EINVAL;
- if (frame_size > DPAA2_ETH_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
/* Set the Max Rx frame length as 'mtu' +
* Maximum Ethernet header length
*/
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* switch to jumbo mode if needed */
- if (frame_size > E1000_ETH_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
return 0;
}
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- E1000_WRITE_REG(hw, E1000_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ E1000_WRITE_REG(hw, E1000_RLPML, dev->data->mtu + E1000_ETH_OVERHEAD);
}
static void
E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- E1000_WRITE_REG(hw, E1000_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len +
- VLAN_TAG_SIZE);
+ E1000_WRITE_REG(hw, E1000_RLPML,
+ dev->data->mtu + E1000_ETH_OVERHEAD + VLAN_TAG_SIZE);
}
static int
rctl = E1000_READ_REG(hw, E1000_RCTL);
/* switch to jumbo mode if needed */
- if (frame_size > E1000_ETH_MAX_LEN) {
+ if (mtu > RTE_ETHER_MTU) {
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= E1000_RCTL_LPE;
}
E1000_WRITE_REG(hw, E1000_RCTL, rctl);
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
- E1000_WRITE_REG(hw, E1000_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ E1000_WRITE_REG(hw, E1000_RLPML, frame_size);
return 0;
}
uint32_t srrctl;
uint16_t buf_size;
uint16_t rctl_bsize;
+ uint32_t max_len;
uint16_t i;
int ret;
/*
* Configure support of jumbo frames, if any.
*/
+ max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
-
rctl |= E1000_RCTL_LPE;
/*
E1000_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * VLAN_TAG_SIZE) > buf_size){
+ if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG,
"forcing scatter mode");
uint32_t srrctl;
uint16_t buf_size;
uint16_t rctl_bsize;
+ uint32_t max_len;
uint16_t i;
int ret;
hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* setup MTU */
- e1000_rlpml_set_vf(hw,
- (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
- VLAN_TAG_SIZE));
+ max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
+ e1000_rlpml_set_vf(hw, (uint16_t)(max_len + VLAN_TAG_SIZE));
/* Configure and enable each RX queue. */
rctl_bsize = 0;
E1000_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * VLAN_TAG_SIZE) > buf_size){
+ if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG,
"forcing scatter mode");
return rc;
}
-static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
-{
- uint32_t max_frame_len = adapter->max_mtu;
-
- if (adapter->edev_data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_JUMBO_FRAME)
- max_frame_len =
- adapter->edev_data->dev_conf.rxmode.max_rx_pkt_len;
-
- return max_frame_len;
-}
-
static int ena_check_valid_conf(struct ena_adapter *adapter)
{
- uint32_t max_frame_len = ena_get_mtu_conf(adapter);
+ uint32_t mtu = adapter->edev_data->mtu;
- if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
+ if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
PMD_INIT_LOG(ERR,
"Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n",
- max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
+ mtu, adapter->max_mtu, ENA_MIN_MTU);
return ENA_COM_UNSUPPORTED;
}
ena_dev = &adapter->ena_dev;
ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
- if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
+ if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
PMD_DRV_LOG(ERR,
"Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n",
- mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
+ mtu, adapter->max_mtu, ENA_MIN_MTU);
return -EINVAL;
}
dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
- dev_info->max_rx_pktlen = adapter->max_mtu;
+ dev_info->max_rx_pktlen = adapter->max_mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
+ dev_info->min_mtu = ENA_MIN_MTU;
+ dev_info->max_mtu = adapter->max_mtu;
dev_info->max_mac_addrs = 1;
dev_info->max_rx_queues = adapter->max_num_io_queues;
return -EINVAL;
}
- if (frame_size > ENETC_ETH_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads &=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
/*setting the MTU*/
enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
uint64_t rx_offloads = eth_conf->rxmode.offloads;
uint32_t checksum = L3_CKSUM | L4_CKSUM;
+ uint32_t max_len;
PMD_INIT_FUNC_TRACE();
- if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- uint32_t max_len;
-
- max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
-
- enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(max_len));
- enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
- ENETC_MAC_MAXFRM_SIZE);
- enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
- 2 * ENETC_MAC_MAXFRM_SIZE);
- dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
- RTE_ETHER_CRC_LEN;
- }
+ max_len = dev->data->dev_conf.rxmode.mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
+ enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(max_len));
+ enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
int config;
* max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
* a hint to the driver to size receive buffers accordingly so that
* larger-than-vnic-mtu packets get truncated.. For DPDK, we let
- * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+ * the user decide the buffer size via rxmode.mtu, basically
* ignoring vNIC mtu.
*/
device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
struct rq_enet_desc *rqd = rq->ring.descs;
unsigned i;
dma_addr_t dma_addr;
- uint32_t max_rx_pkt_len;
+ uint32_t max_rx_pktlen;
uint16_t rq_buf_len;
if (!rq->in_use)
/*
* If *not* using scatter and the mbuf size is greater than the
- * requested max packet size (max_rx_pkt_len), then reduce the
- * posted buffer size to max_rx_pkt_len. HW still receives packets
- * larger than max_rx_pkt_len, but they will be truncated, which we
+ * requested max packet size (mtu + eth overhead), then reduce the
+ * posted buffer size to max packet size. HW still receives packets
+ * larger than max packet size, but they will be truncated, which we
* drop in the rx handler. Not ideal, but better than returning
* large packets when the user is not expecting them.
*/
- max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
- if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
- rq_buf_len = max_rx_pkt_len;
+ if (max_rx_pktlen < rq_buf_len && !rq->data_queue_enable)
+ rq_buf_len = max_rx_pktlen;
for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
mb = rte_mbuf_raw_alloc(rq->mp);
if (mb == NULL) {
unsigned int mbuf_size, mbufs_per_pkt;
unsigned int nb_sop_desc, nb_data_desc;
uint16_t min_sop, max_sop, min_data, max_data;
- uint32_t max_rx_pkt_len;
+ uint32_t max_rx_pktlen;
/*
* Representor uses a reserved PF queue. Translate representor
mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
RTE_PKTMBUF_HEADROOM);
- /* max_rx_pkt_len includes the ethernet header and CRC. */
- max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ /* max_rx_pktlen includes the ethernet header and CRC. */
+ max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
if (enic->rte_dev->data->dev_conf.rxmode.offloads &
DEV_RX_OFFLOAD_SCATTER) {
dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
/* ceil((max pkt len)/mbuf_size) */
- mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
+ mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
} else {
dev_info(enic, "Scatter rx mode disabled\n");
mbufs_per_pkt = 1;
- if (max_rx_pkt_len > mbuf_size) {
+ if (max_rx_pktlen > mbuf_size) {
dev_warning(enic, "The maximum Rx packet size (%u) is"
" larger than the mbuf size (%u), and"
" scatter is disabled. Larger packets will"
" be truncated.\n",
- max_rx_pkt_len, mbuf_size);
+ max_rx_pktlen, mbuf_size);
}
}
rq_sop->data_queue_enable = 1;
rq_data->in_use = 1;
/*
- * HW does not directly support rxmode.max_rx_pkt_len. HW always
+ * HW does not directly support MTU. HW always
* receives packet sizes up to the "max" MTU.
* If not using scatter, we can achieve the effect of dropping
* larger packets by reducing the size of posted buffers.
* See enic_alloc_rx_queue_mbufs().
*/
- if (max_rx_pkt_len <
- enic_mtu_to_max_rx_pktlen(enic->max_mtu)) {
- dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
- " when scatter rx mode is in use.\n");
+ if (enic->rte_dev->data->mtu < enic->max_mtu) {
+ dev_warning(enic,
+ "mtu is ignored when scatter rx mode is in use.\n");
}
} else {
dev_info(enic, "Rq %u Scatter rx mode not being used\n",
if (mbufs_per_pkt > 1) {
dev_info(enic, "For max packet size %u and mbuf size %u valid"
" rx descriptor range is %u to %u\n",
- max_rx_pkt_len, mbuf_size, min_sop + min_data,
+ max_rx_pktlen, mbuf_size, min_sop + min_data,
max_sop + max_data);
}
dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
"MTU (%u) is greater than value configured in NIC (%u)\n",
new_mtu, config_mtu);
- /* Update the MTU and maximum packet length */
- eth_dev->data->mtu = new_mtu;
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
- enic_mtu_to_max_rx_pktlen(new_mtu);
-
/*
* If the device has not started (enic_enable), nothing to do.
* Later, enic_enable() will set up RQs reflecting the new maximum
FM10K_SRRCTL_LOOPBACK_SUPPRESS);
/* It adds dual VLAN length for supporting dual VLAN */
- if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+ if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
uint32_t reg;
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
/* mtu size is 256~9600 */
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
- dev->data->dev_conf.rxmode.max_rx_pkt_len >
- HINIC_MAX_JUMBO_FRAME_SIZE) {
+ if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
+ HINIC_MIN_FRAME_SIZE ||
+ HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) >
+ HINIC_MAX_JUMBO_FRAME_SIZE) {
PMD_DRV_LOG(ERR,
- "Max rx pkt len out of range, get max_rx_pkt_len:%d, "
+ "Packet length out of range, get packet length:%d, "
"expect between %d and %d",
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu),
HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE);
return -EINVAL;
}
- nic_dev->mtu_size =
- HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ nic_dev->mtu_size = dev->data->dev_conf.rxmode.mtu;
/* rss template */
err = hinic_config_mq_mode(dev, TRUE);
static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
{
struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
- uint32_t frame_size;
int ret = 0;
PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
return ret;
}
- /* update max frame size */
- frame_size = HINIC_MTU_TO_PKTLEN(mtu);
- if (frame_size > HINIC_ETH_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
nic_dev->mtu_size = mtu;
return ret;
return 0;
}
-static int
-hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf)
-{
- struct hns3_adapter *hns = dev->data->dev_private;
- struct hns3_hw *hw = &hns->hw;
- uint32_t max_rx_pkt_len;
- uint16_t mtu;
- int ret;
-
- if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME))
- return 0;
-
- /*
- * If jumbo frames are enabled, MTU needs to be refreshed
- * according to the maximum RX packet length.
- */
- max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
- if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
- max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
- hns3_err(hw, "maximum Rx packet length must be greater than %u "
- "and no more than %u when jumbo frame enabled.",
- (uint16_t)HNS3_DEFAULT_FRAME_LEN,
- (uint16_t)HNS3_MAX_FRAME_LEN);
- return -EINVAL;
- }
-
- mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
- ret = hns3_dev_mtu_set(dev, mtu);
- if (ret)
- return ret;
- dev->data->mtu = mtu;
-
- return 0;
-}
-
static int
hns3_setup_dcb(struct rte_eth_dev *dev)
{
goto cfg_err;
}
- ret = hns3_refresh_mtu(dev, conf);
- if (ret)
+ ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
+ if (ret != 0)
goto cfg_err;
ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
}
rte_spinlock_lock(&hw->lock);
- is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false;
+ is_jumbo_frame = mtu > RTE_ETHER_MTU ? true : false;
frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
/*
else
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
rte_spinlock_unlock(&hw->lock);
return 0;
uint16_t nb_rx_q = dev->data->nb_rx_queues;
uint16_t nb_tx_q = dev->data->nb_tx_queues;
struct rte_eth_rss_conf rss_conf;
- uint32_t max_rx_pkt_len;
- uint16_t mtu;
bool gro_en;
int ret;
goto cfg_err;
}
- /*
- * If jumbo frames are enabled, MTU needs to be refreshed
- * according to the maximum RX packet length.
- */
- if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
- if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
- max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
- hns3_err(hw, "maximum Rx packet length must be greater "
- "than %u and less than %u when jumbo frame enabled.",
- (uint16_t)HNS3_DEFAULT_FRAME_LEN,
- (uint16_t)HNS3_MAX_FRAME_LEN);
- ret = -EINVAL;
- goto cfg_err;
- }
-
- mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
- ret = hns3vf_dev_mtu_set(dev, mtu);
- if (ret)
- goto cfg_err;
- dev->data->mtu = mtu;
- }
+ ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
+ if (ret != 0)
+ goto cfg_err;
ret = hns3vf_dev_configure_vlan(dev);
if (ret)
else
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
rte_spinlock_unlock(&hw->lock);
return 0;
uint16_t nb_desc)
{
struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
- struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+ uint32_t frame_size = dev->data->mtu + HNS3_ETH_OVERHEAD;
uint16_t min_vec_bds;
/*
* HNS3 hardware network engine set scattered as default. If the driver
* is not work in scattered mode and the pkts greater than buf_size
- * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
+ * but smaller than frame size will be distributed to multiple BDs.
* Driver cannot handle this situation.
*/
- if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
- hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
+ if (!hw->data->scattered_rx && frame_size > buf_size) {
+ hns3_err(hw, "frame size is not allowed to be set greater "
"than rx_buf_len if scattered is off.");
return -EINVAL;
}
}
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
- dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
+ dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
dev->data->scattered_rx = true;
}
return -EBUSY;
}
- if (frame_size > I40E_ETH_MAX_LEN)
- dev_data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (mtu > RTE_ETHER_MTU)
+ dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
- dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
return ret;
}
}
rxq->max_pkt_len =
- RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
- rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
+ RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
+ data->mtu + I40E_ETH_OVERHEAD);
if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_dev_data *dev_data = dev->data;
uint16_t buf_size, max_pkt_len;
+ uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
/* Calculate the maximum packet length allowed */
max_pkt_len = RTE_MIN((uint32_t)
rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ frame_size);
/* Check if the jumbo frame and maximum packet length are set
* correctly.
adapter->stopped = 0;
- vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
dev->data->nb_tx_queues);
num_queue_pairs = vf->num_queue_pairs;
return -EBUSY;
}
- if (frame_size > IAVF_ETH_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
return ret;
}
buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
rxq->rx_hdr_len = 0;
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
- max_pkt_len = RTE_MIN((uint32_t)
- ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+ dev->data->mtu + ICE_ETH_OVERHEAD);
/* Check if the jumbo frame and maximum packet length are set
* correctly.
pf->adapter_stopped = false;
/* Set the max frame size to default value*/
- max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
- pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
+ max_frame_size = pf->dev_data->mtu ?
+ pf->dev_data->mtu + ICE_ETH_OVERHEAD :
ICE_FRAME_SIZE_MAX;
/* Set the max frame size to HW*/
return -EBUSY;
}
- if (frame_size > ICE_ETH_MAX_LEN)
- dev_data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (mtu > RTE_ETHER_MTU)
+ dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
- dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
return 0;
}
uint32_t rxdid = ICE_RXDID_COMMS_OVS;
uint32_t regval;
struct ice_adapter *ad = rxq->vsi->adapter;
+ uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
/* Set buffer size as the head split is disabled. */
buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
RTE_PKTMBUF_HEADROOM);
rxq->rx_hdr_len = 0;
rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
- rxq->max_pkt_len = RTE_MIN((uint32_t)
- ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
- dev_data->dev_conf.rxmode.max_rx_pkt_len);
+ rxq->max_pkt_len =
+ RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+ frame_size);
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
return -EINVAL;
}
- buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
- RTE_PKTMBUF_HEADROOM);
-
/* Check if scattered RX needs to be used. */
- if (rxq->max_pkt_len > buf_size)
+ if (frame_size > buf_size)
dev_data->scattered_rx = 1;
rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
#define IGC_INTEL_VENDOR_ID 0x8086
-/*
- * The overhead from MTU to max frame size.
- * Considering VLAN so tag needs to be counted.
- */
-#define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
- RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
-
#define IGC_FC_PAUSE_TIME 0x0680
#define IGC_LINK_UPDATE_CHECK_TIMEOUT 90 /* 9s */
#define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
/* switch to jumbo mode if needed */
if (mtu > RTE_ETHER_MTU) {
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl |= IGC_RCTL_LPE;
} else {
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
rctl &= ~IGC_RCTL_LPE;
}
IGC_WRITE_REG(hw, IGC_RCTL, rctl);
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
- IGC_WRITE_REG(hw, IGC_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
return 0;
}
igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
{
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
uint32_t ctrl_ext;
ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
return 0;
- if ((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
- goto write_ext_vlan;
-
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
- RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
+ if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
+ frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
return -EINVAL;
}
- dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
- IGC_WRITE_REG(hw, IGC_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE);
-write_ext_vlan:
IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
return 0;
}
igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
{
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+ uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
uint32_t ctrl_ext;
ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
return 0;
- if ((dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
- goto write_ext_vlan;
-
/* Update maximum packet length */
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
+ if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) {
PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
- dev->data->dev_conf.rxmode.max_rx_pkt_len +
- VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
+ frame_size, MAX_RX_JUMBO_FRAME_SIZE);
return -EINVAL;
}
- dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
- IGC_WRITE_REG(hw, IGC_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
-write_ext_vlan:
IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
return 0;
}
#define IGC_HKEY_REG_SIZE IGC_DEFAULT_REG_SIZE
#define IGC_HKEY_SIZE (IGC_HKEY_REG_SIZE * IGC_HKEY_MAX_INDEX)
+/*
+ * The overhead from MTU to max frame size.
+ * Considering VLAN so tag needs to be counted.
+ */
+#define IGC_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + \
+ RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2)
+
/*
* TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
* multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
struct igc_rx_queue *rxq;
struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
- uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t max_rx_pktlen;
uint32_t rctl;
uint32_t rxcsum;
uint16_t buf_size;
IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
/* Configure support of jumbo frames, if any. */
- if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if ((offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
rctl |= IGC_RCTL_LPE;
-
- /*
- * Set maximum packet length by default, and might be updated
- * together with enabling/disabling dual VLAN.
- */
- IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len);
- } else {
+ else
rctl &= ~IGC_RCTL_LPE;
- }
+
+ max_rx_pktlen = dev->data->mtu + IGC_ETH_OVERHEAD;
+ /*
+ * Set maximum packet length by default, and might be updated
+ * together with enabling/disabling dual VLAN.
+ */
+ IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pktlen);
/* Configure and enable each RX queue. */
rctl_bsize = 0;
IGC_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size)
+ if (max_rx_pktlen > buf_size)
dev->data->scattered_rx = 1;
} else {
/*
ionic_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
- uint32_t max_frame_size;
int err;
IONIC_PRINT_CALL();
/*
* Note: mtu check against IONIC_MIN_MTU, IONIC_MAX_MTU
- * is done by the the API.
+ * is done by the API.
*/
- /*
- * Max frame size is MTU + Ethernet header + VLAN + QinQ
- * (plus ETHER_CRC_LEN if the adapter is able to keep CRC)
- */
- max_frame_size = mtu + RTE_ETHER_HDR_LEN + 4 + 4;
-
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len < max_frame_size)
- return -EINVAL;
-
err = ionic_lif_change_mtu(lif, mtu);
if (err)
return err;
struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
struct rte_mbuf *rxm, *rxm_seg;
uint32_t max_frame_size =
- rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
uint64_t pkt_flags = 0;
uint32_t pkt_type;
struct ionic_rx_stats *stats = &rxq->stats;
int __rte_cold
ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
{
- uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t frame_size = eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
struct ionic_rx_qcq *rxq;
int err;
{
struct ionic_rx_qcq *rxq = rx_queue;
uint32_t frame_size =
- rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
struct ionic_rx_service service_cb_arg;
service_cb_arg.rx_pkts = rx_pkts;
return -EBUSY;
}
- if (frame_size > IPN3KE_ETH_MAX_LEN)
- dev_data->dev_conf.rxmode.offloads |=
- (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
+ if (mtu > RTE_ETHER_MTU)
+ dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
- dev_data->dev_conf.rxmode.offloads &=
- (uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
-
- dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
if (rpst->i40e_pf_eth) {
ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth,
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
- struct rte_eth_dev_data *dev_data = dev->data;
int ret;
ret = ixgbe_dev_info_get(dev, &dev_info);
/* If device is started, refuse mtu that requires the support of
* scattered packets when this feature has not been enabled before.
*/
- if (dev_data->dev_started && !dev_data->scattered_rx &&
- (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
- dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ if (dev->data->dev_started && !dev->data->scattered_rx &&
+ frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
PMD_INIT_LOG(ERR, "Stop port first.");
return -EINVAL;
}
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
/* switch to jumbo mode if needed */
- if (frame_size > IXGBE_ETH_MAX_LEN) {
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (mtu > RTE_ETHER_MTU) {
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
} else {
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
}
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
- maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+ maxfrs |= (frame_size << 16);
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
return 0;
* set as 0x4.
*/
if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
- (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
- IXGBE_MMW_SIZE_JUMBO_FRAME);
+ (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE))
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME);
else
- IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
- IXGBE_MMW_SIZE_DEFAULT);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT);
/* Set RTTBCNRC of queue X */
IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if (mtu < RTE_ETHER_MIN_MTU ||
- max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
+ if (mtu < RTE_ETHER_MIN_MTU || max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
return -EINVAL;
/* If device is started, refuse mtu that requires the support of
*/
if (dev_data->dev_started && !dev_data->scattered_rx &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
- dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+ dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
PMD_INIT_LOG(ERR, "Stop port first.");
return -EINVAL;
}
if (ixgbevf_rlpml_set_vf(hw, max_frame))
return -EINVAL;
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
return 0;
}
* if PF has jumbo frames enabled which means legacy
* VFs are disabled.
*/
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
- IXGBE_ETH_MAX_LEN)
+ if (dev->data->mtu > RTE_ETHER_MTU)
break;
/* fall through */
default:
* legacy VFs.
*/
if (max_frame > IXGBE_ETH_MAX_LEN ||
- dev->data->dev_conf.rxmode.max_rx_pkt_len >
- IXGBE_ETH_MAX_LEN)
+ dev->data->mtu > RTE_ETHER_MTU)
return -1;
break;
}
uint16_t buf_size;
uint16_t i;
struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+ uint32_t frame_size = dev->data->mtu + IXGBE_ETH_OVERHEAD;
int rc;
PMD_INIT_FUNC_TRACE();
hlreg0 |= IXGBE_HLREG0_JUMBOEN;
maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
maxfrs &= 0x0000FFFF;
- maxfrs |= (rx_conf->max_rx_pkt_len << 16);
+ maxfrs |= (frame_size << 16);
IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
} else
hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
IXGBE_SRRCTL_BSIZEPKT_SHIFT);
/* It adds dual VLAN length for supporting dual VLAN */
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
+ if (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
struct ixgbe_hw *hw;
struct ixgbe_rx_queue *rxq;
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+ uint32_t frame_size = dev->data->mtu + IXGBE_ETH_OVERHEAD;
uint64_t bus_addr;
uint32_t srrctl, psrtype = 0;
uint16_t buf_size;
* ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
* VF packets received can work in all cases.
*/
- if (ixgbevf_rlpml_set_vf(hw,
- (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
+ if (ixgbevf_rlpml_set_vf(hw, frame_size) != 0) {
PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ frame_size);
return -EINVAL;
}
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
- (rxmode->max_rx_pkt_len +
- 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
+ (frame_size + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
dev->data->scattered_rx = 1;
{
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
- uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
struct lio_dev_ctrl_cmd ctrl_cmd;
struct lio_ctrl_pkt ctrl_pkt;
return -1;
}
- if (frame_len > LIO_ETH_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
eth_dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
eth_dev->data->dev_conf.rxmode.offloads &=
~DEV_RX_OFFLOAD_JUMBO_FRAME;
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
- eth_dev->data->mtu = mtu;
-
return 0;
}
static int
lio_dev_start(struct rte_eth_dev *eth_dev)
{
- uint16_t mtu;
- uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
struct lio_device *lio_dev = LIO_DEV(eth_dev);
uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
int ret = 0;
goto dev_mtu_set_error;
}
- mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);
- if (mtu < RTE_ETHER_MIN_MTU)
- mtu = RTE_ETHER_MIN_MTU;
-
- if (eth_dev->data->mtu != mtu) {
- ret = lio_dev_mtu_set(eth_dev, mtu);
- if (ret)
- goto dev_mtu_set_error;
- }
+ ret = lio_dev_mtu_set(eth_dev, eth_dev->data->mtu);
+ if (ret != 0)
+ goto dev_mtu_set_error;
return 0;
int ret;
uint32_t crc_present;
uint64_t offloads;
+ uint32_t max_rx_pktlen;
offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
dev->data->rx_queues[idx] = rxq;
/* Enable scattered packets support for this queue if necessary. */
MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
- (mb_len - RTE_PKTMBUF_HEADROOM)) {
+ max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
- uint32_t size =
- RTE_PKTMBUF_HEADROOM +
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
uint32_t sges_n;
/*
/* Make sure sges_n did not overflow. */
size = mb_len * (1 << rxq->sges_n);
size -= RTE_PKTMBUF_HEADROOM;
- if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+ if (size < max_rx_pktlen) {
rte_errno = EOVERFLOW;
ERROR("%p: too many SGEs (%u) needed to handle"
" requested maximum packet size %u",
(void *)dev,
- 1 << sges_n,
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ 1 << sges_n, max_rx_pktlen);
goto error;
}
} else {
WARN("%p: the requested maximum Rx packet size (%u) is"
" larger than a single mbuf (%u) and scattered"
" mode has not been requested",
- (void *)dev,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ (void *)dev, max_rx_pktlen,
mb_len - RTE_PKTMBUF_HEADROOM);
}
DEBUG("%p: maximum number of segments per packet: %u",
uint64_t offloads = conf->offloads |
dev->data->dev_conf.rxmode.offloads;
unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
- unsigned int max_rx_pkt_len = lro_on_queue ?
+ unsigned int max_rx_pktlen = lro_on_queue ?
dev->data->dev_conf.rxmode.max_lro_pkt_size :
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
- unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
+ dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
+ unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
RTE_PKTMBUF_HEADROOM;
unsigned int max_lro_size = 0;
unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
* needed to handle max size packets, replace zero length
* with the buffer length from the pool.
*/
- tail_len = max_rx_pkt_len;
+ tail_len = max_rx_pktlen;
do {
struct mlx5_eth_rxseg *hw_seg =
&tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
"port %u too many SGEs (%u) needed to handle"
" requested maximum packet size %u, the maximum"
" supported are %u", dev->data->port_id,
- tmpl->rxq.rxseg_n, max_rx_pkt_len,
+ tmpl->rxq.rxseg_n, max_rx_pktlen,
MLX5_MAX_RXQ_NSEG);
rte_errno = ENOTSUP;
goto error;
DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
" configured and no enough mbuf space(%u) to contain "
"the maximum RX packet length(%u) with head-room(%u)",
- dev->data->port_id, idx, mb_len, max_rx_pkt_len,
+ dev->data->port_id, idx, mb_len, max_rx_pktlen,
RTE_PKTMBUF_HEADROOM);
rte_errno = ENOSPC;
goto error;
* following conditions are met:
* - MPRQ is enabled.
* - The number of descs is more than the number of strides.
- * - max_rx_pkt_len plus overhead is less than the max size
+ * - max_rx_pktlen plus overhead is less than the max size
* of a stride or mprq_stride_size is specified by a user.
* Need to make sure that there are enough strides to encap
* the maximum packet size in case mprq_stride_size is set.
!!(offloads & DEV_RX_OFFLOAD_SCATTER);
tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
config->mprq.max_memcpy_len);
- max_lro_size = RTE_MIN(max_rx_pkt_len,
+ max_lro_size = RTE_MIN(max_rx_pktlen,
(1u << tmpl->rxq.strd_num_n) *
(1u << tmpl->rxq.strd_sz_n));
DRV_LOG(DEBUG,
dev->data->port_id, idx,
tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
} else if (tmpl->rxq.rxseg_n == 1) {
- MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
+ MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
tmpl->rxq.sges_n = 0;
- max_lro_size = max_rx_pkt_len;
+ max_lro_size = max_rx_pktlen;
} else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
unsigned int sges_n;
"port %u too many SGEs (%u) needed to handle"
" requested maximum packet size %u, the maximum"
" supported are %u", dev->data->port_id,
- 1 << sges_n, max_rx_pkt_len,
+ 1 << sges_n, max_rx_pktlen,
1u << MLX5_MAX_LOG_RQ_SEGS);
rte_errno = ENOTSUP;
goto error;
}
tmpl->rxq.sges_n = sges_n;
- max_lro_size = max_rx_pkt_len;
+ max_lro_size = max_rx_pktlen;
}
if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
DRV_LOG(WARNING,
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
- MRVL_NETA_ETH_HDRS_LEN;
-
if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
priv->multiseg = 1;
return -EINVAL;
}
- dev->data->mtu = mtu;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
-
if (!priv->ppio)
/* It is OK. New MTU will be set later on mvneta_dev_start */
return 0;
struct mvneta_priv *priv = dev->data->dev_private;
struct mvneta_rxq *rxq;
uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
- uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS;
- if (frame_size < max_rx_pkt_len) {
+ if (frame_size < max_rx_pktlen) {
MVNETA_LOG(ERR,
"Mbuf size must be increased to %u bytes to hold up "
"to %u bytes of data.",
- buf_size + max_rx_pkt_len - frame_size,
- max_rx_pkt_len);
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
- MVNETA_LOG(INFO, "Setting max rx pkt len to %u",
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ max_rx_pktlen + buf_size - frame_size,
+ max_rx_pktlen);
+ dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
+ MVNETA_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
}
if (dev->data->rx_queues[idx]) {
return -EINVAL;
}
- if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
- MRVL_PP2_ETH_HDRS_LEN;
- if (dev->data->mtu > priv->max_mtu) {
- MRVL_LOG(ERR, "inherit MTU %u from max_rx_pkt_len %u is larger than max_mtu %u\n",
- dev->data->mtu,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
- priv->max_mtu);
- return -EINVAL;
- }
+ if (dev->data->dev_conf.rxmode.mtu > priv->max_mtu) {
+ MRVL_LOG(ERR, "MTU %u is larger than max_mtu %u\n",
+ dev->data->dev_conf.rxmode.mtu,
+ priv->max_mtu);
+ return -EINVAL;
}
if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
return -EINVAL;
}
- dev->data->mtu = mtu;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
-
if (!priv->ppio)
return 0;
struct mrvl_priv *priv = dev->data->dev_private;
struct mrvl_rxq *rxq;
uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
- uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
int ret, tc, inq;
uint64_t offloads;
return -EFAULT;
}
- frame_size = buf_size - RTE_PKTMBUF_HEADROOM -
- MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN;
- if (frame_size < max_rx_pkt_len) {
+ frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
+ if (frame_size < max_rx_pktlen) {
MRVL_LOG(WARNING,
"Mbuf size must be increased to %u bytes to hold up "
"to %u bytes of data.",
- buf_size + max_rx_pkt_len - frame_size,
- max_rx_pkt_len);
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
- MRVL_LOG(INFO, "Setting max rx pkt len to %u",
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ max_rx_pktlen + buf_size - frame_size,
+ max_rx_pktlen);
+ dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
+ MRVL_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
}
if (dev->data->rx_queues[idx]) {
}
if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- hw->mtu = rxmode->max_rx_pkt_len;
+ hw->mtu = dev->data->mtu;
if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
}
/* switch to jumbo mode if needed */
- if ((uint32_t)mtu > RTE_ETHER_MTU)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
-
/* writing to configuration space */
- nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
+ nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
hw->mtu = mtu;
if (rc)
return rc;
- if (frame_size > OCCTX_L2_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- /* Update max_rx_pkt_len */
- data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
octeontx_log_info("Received pkt beyond maxlen %d will be dropped",
frame_size);
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
/* Setup scatter mode if needed by jumbo */
- if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ if (data->mtu > buffsz) {
nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
evdev_priv->rx_offload_flags = nic->rx_offload_flags;
evdev_priv->tx_offload_flags = nic->tx_offload_flags;
- /* Setup MTU based on max_rx_pkt_len */
- nic->mtu = data->dev_conf.rxmode.max_rx_pkt_len - OCCTX_L2_OVERHEAD;
+ /* Setup MTU */
+ nic->mtu = data->mtu;
return 0;
}
octeontx_recheck_rx_offloads(rxq);
}
- /* Setting up the mtu based on max_rx_pkt_len */
+ /* Setting up the mtu */
ret = octeontx_dev_mtu_set(dev, nic->mtu);
if (ret) {
octeontx_log_err("Failed to set default MTU size %d", ret);
mbp_priv = rte_mempool_get_priv(rxq->pool);
buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
- if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+ if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
if (rc)
return rc;
- if (frame_size > NIX_L2_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- /* Update max_rx_pkt_len */
- data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
return rc;
}
{
struct rte_eth_dev_data *data = eth_dev->data;
struct otx2_eth_rxq *rxq;
- uint16_t mtu;
int rc;
rxq = data->rx_queues[0];
/* Setup scatter mode if needed by jumbo */
otx2_nix_enable_mseg_on_jumbo(rxq);
- /* Setup MTU based on max_rx_pkt_len */
- mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD;
-
- rc = otx2_nix_mtu_set(eth_dev, mtu);
+ rc = otx2_nix_mtu_set(eth_dev, data->mtu);
if (rc)
otx2_err("Failed to set default MTU size %d", rc);
static int
pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
- int ret;
struct pfe_eth_priv_s *priv = dev->data->dev_private;
uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
/*TODO Support VLAN*/
- ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
- if (!ret)
- dev->data->mtu = mtu;
-
- return ret;
+ return gemac_set_rx(priv->EMAC_baseaddr, frame_size);
}
/* pfe_eth_enet_addr_byte_mac
return -ENOMEM;
}
- /* If jumbo enabled adjust MTU */
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- eth_dev->data->mtu =
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
- RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
-
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_dev_info dev_info = {0};
struct qede_fastpath *fp;
- uint32_t max_rx_pkt_len;
uint32_t frame_size;
uint16_t bufsz;
bool restart = false;
DP_ERR(edev, "Error during getting ethernet device info\n");
return rc;
}
- max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
- frame_size = max_rx_pkt_len;
+
+ frame_size = mtu + QEDE_MAX_ETHER_HDR_LEN;
if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
fp->rxq->rx_buf_size = rc;
}
}
- if (frame_size > QEDE_ETH_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
dev->data->dev_started = 1;
}
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
-
return 0;
}
struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct qede_rx_queue *rxq;
- uint16_t max_rx_pkt_len;
+ uint16_t max_rx_pktlen;
uint16_t bufsz;
int rc;
dev->data->rx_queues[qid] = NULL;
}
- max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+ max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
/* Fix up RX buffer size */
bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
/* cache align the mbuf size to simplfy rx_buf_size calculation */
bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
- (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+ (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
if (!dev->data->scattered_rx) {
DP_INFO(edev, "Forcing scatter-gather mode\n");
dev->data->scattered_rx = 1;
}
}
- rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+ rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
if (rc < 0)
return rc;
/*
* The driver does not use it, but other PMDs update jumbo frame
- * flag and max_rx_pkt_len when MTU is set.
+ * flag when MTU is set.
*/
if (mtu > RTE_ETHER_MTU) {
struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
}
- dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
-
sfc_adapter_unlock(sa);
sfc_log_init(sa, "done");
{
const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
struct sfc_port *port = &sa->port;
- const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
sfc_log_init(sa, "entry");
- if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- port->pdu = rxmode->max_rx_pkt_len;
- else
- port->pdu = EFX_MAC_PDU(dev_data->mtu);
+ port->pdu = EFX_MAC_PDU(dev_data->mtu);
return 0;
}
{
struct pmd_internals *pmd = dev->data->dev_private;
struct ifreq ifr = { .ifr_mtu = mtu };
- int err = 0;
- err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
- if (!err)
- dev->data->mtu = mtu;
-
- return err;
+ return tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
}
static int
(frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
return -EINVAL;
- if (frame_size > NIC_HW_L2_MAX_LEN)
+ if (mtu > RTE_ETHER_MTU)
rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
if (nicvf_mbox_update_hw_max_frs(nic, mtu))
return -EINVAL;
- /* Update max_rx_pkt_len */
- rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN;
nic->mtu = mtu;
for (i = 0; i < nic->sqs_count; i++)
}
/* Setup scatter mode if needed by jumbo */
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * VLAN_TAG_SIZE > buffsz)
+ if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
dev->data->scattered_rx = 1;
if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
dev->data->scattered_rx = 1;
- /* Setup MTU based on max_rx_pkt_len or default */
- mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
- dev->data->dev_conf.rxmode.max_rx_pkt_len
- - RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
+ /* Setup MTU */
+ mtu = dev->data->mtu;
if (nicvf_dev_set_mtu(dev, mtu)) {
PMD_INIT_LOG(ERR, "Failed to set default mtu size");
return -EINVAL;
}
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+ /* switch to jumbo mode if needed */
+ if (mtu > RTE_ETHER_MTU)
+ dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
if (hw->mode)
wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
#define TXGBE_5TUPLE_MAX_PRI 7
#define TXGBE_5TUPLE_MIN_PRI 1
+
+/* The overhead from MTU to max frame size. */
+#define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+
#define TXGBE_RSS_OFFLOAD_ALL ( \
ETH_RSS_IPV4 | \
ETH_RSS_NONFRAG_IPV4_TCP | \
if (txgbevf_rlpml_set_vf(hw, max_frame))
return -EINVAL;
- /* update max frame size */
- dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
return 0;
}
/*
* Configure jumbo frame support, if any.
*/
- if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
- TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
- } else {
- wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
- TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
- }
+ wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+ TXGBE_FRMSZ_MAX(dev->data->mtu + TXGBE_ETH_OVERHEAD));
/*
* If loopback mode is configured, set LPBK bit.
wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
/* It adds dual VLAN length for supporting dual VLAN */
- if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
- 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
+ if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
+ 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
dev->data->scattered_rx = 1;
if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
* VF packets received can work in all cases.
*/
if (txgbevf_rlpml_set_vf(hw,
- (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
+ (uint16_t)dev->data->mtu + TXGBE_ETH_OVERHEAD)) {
PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
- dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ dev->data->mtu + TXGBE_ETH_OVERHEAD);
return -EINVAL;
}
if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
/* It adds dual VLAN length for supporting dual VLAN */
- (rxmode->max_rx_pkt_len +
+ (dev->data->mtu + TXGBE_ETH_OVERHEAD +
2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
if (!dev->data->scattered_rx)
PMD_INIT_LOG(DEBUG, "forcing scatter mode");
}
hw->max_rx_pkt_len = frame_size;
- dev->data->dev_conf.rxmode.max_rx_pkt_len = hw->max_rx_pkt_len;
return 0;
}
return ret;
}
- if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
- (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len))
+ if (rxmode->mtu > hw->max_mtu)
req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
- if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
- hw->max_rx_pkt_len = rxmode->max_rx_pkt_len;
- else
- hw->max_rx_pkt_len = ether_hdr_len + dev->data->mtu;
+ hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM))
static const struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.rx_adv_conf = {
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
.rx_adv_conf = {
.rss_conf = {
static const struct rte_eth_conf port_conf_default = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
},
.rx_adv_conf = {
.rss_conf = {
} parm_config;
const char cb_port_delim[] = ":";
-/* Ethernet ports configured with default settings using struct. 8< */
-static const struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
- },
-};
-/* >8 End of configuration of Ethernet ports. */
-
/* Creation of flow classifier object. 8< */
struct flow_classifier {
struct rte_flow_classifier *cls;
static inline int
port_init(uint8_t port, struct rte_mempool *mbuf_pool)
{
- struct rte_eth_conf port_conf = port_conf_default;
+ struct rte_eth_conf port_conf;
struct rte_ether_addr addr;
const uint16_t rx_rings = 1, tx_rings = 1;
int retval;
if (!rte_eth_dev_is_valid_port(port))
return -1;
+ memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
retval = rte_eth_dev_info_get(port, &dev_info);
if (retval != 0) {
printf("Error during getting device (port %u) info: %s\n",
static const struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN
},
.rx_adv_conf = {
.rss_conf = {
static struct rte_eth_conf port_conf = {
.rxmode = {
- .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
+ .mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_SCATTER |
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
- dev_info.max_rx_pktlen,
- local_port_conf.rxmode.max_rx_pkt_len);
+ local_port_conf.rxmode.mtu = RTE_MIN(
+ dev_info.max_mtu,
+ local_port_conf.rxmode.mtu);
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
}
/* set the mtu to the maximum received packet size */
- ret = rte_eth_dev_set_mtu(portid,
- local_port_conf.rxmode.max_rx_pkt_len - MTU_OVERHEAD);
+ ret = rte_eth_dev_set_mtu(portid, local_port_conf.rxmode.mtu);
if (ret < 0) {
printf("\n");
rte_exit(EXIT_FAILURE, "Set MTU failed: "
.link_speeds = 0,
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
.split_hdr_size = 0, /* Header split buffer size */
},
.rx_adv_conf = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
+ .mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
.offloads = (DEV_RX_OFFLOAD_CHECKSUM |
DEV_RX_OFFLOAD_JUMBO_FRAME),
/* mbufs stored int the gragment table. 8< */
nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
- nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
+ nb_mbuf *= (port_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+ + BUF_SIZE - 1) / BUF_SIZE;
nb_mbuf *= 2; /* ipv4 and ipv6 */
nb_mbuf += nb_rxd + nb_txd;
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
- dev_info.max_rx_pktlen,
- local_port_conf.rxmode.max_rx_pkt_len);
+ local_port_conf.rxmode.mtu = RTE_MIN(
+ dev_info.max_mtu,
+ local_port_conf.rxmode.mtu);
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
static void
port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
{
- uint32_t frame_size;
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
uint16_t nb_tx_queue, nb_rx_queue;
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
- frame_size = MTU_TO_FRAMELEN(mtu_size);
- if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
+ if (mtu_size > RTE_ETHER_MTU)
local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- local_port_conf.rxmode.max_rx_pkt_len = frame_size;
+ local_port_conf.rxmode.mtu = mtu_size;
if (multi_seg_required()) {
local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
static struct rte_eth_conf port_conf = {
.rxmode = {
- .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
+ .mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
},
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
- local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
- dev_info.max_rx_pktlen,
- local_port_conf.rxmode.max_rx_pkt_len);
+ local_port_conf.rxmode.mtu = RTE_MIN(
+ dev_info.max_mtu,
+ local_port_conf.rxmode.mtu);
/* get the lcore_id for this port */
while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
memcpy(&conf, &port_conf, sizeof(conf));
/* Set new MTU */
- if (new_mtu > RTE_ETHER_MAX_LEN)
+ if (new_mtu > RTE_ETHER_MTU)
conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- /* mtu + length of header + length of FCS = max pkt length */
- conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
- KNI_ENET_FCS_SIZE;
+ conf.rxmode.mtu = new_mtu;
ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
if (ret < 0) {
RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
#define MBUF_CACHE_SIZE 250
#define BURST_SIZE 32
-static const struct rte_eth_conf port_conf_default = {
- .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
-};
-
/* l2fwd-cat.c: CAT enabled, basic DPDK skeleton forwarding example. */
/*
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
- struct rte_eth_conf port_conf = port_conf_default;
+ struct rte_eth_conf port_conf;
const uint16_t rx_rings = 1, tx_rings = 1;
int retval;
uint16_t q;
if (!rte_eth_dev_is_valid_port(port))
return -1;
+ memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
/* Configure the Ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0)
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
struct rte_eth_conf port_conf = {
.rxmode = {
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
},
};
+static uint32_t max_pkt_len;
+
static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
/* ethernet addresses of ports */
OPT_CONFIG_NUM = 256,
#define OPT_NONUMA "no-numa"
OPT_NONUMA_NUM,
-#define OPT_ENBJMO "enable-jumbo"
- OPT_ENBJMO_NUM,
+#define OPT_MAX_PKT_LEN "max-pkt-len"
+ OPT_MAX_PKT_LEN_NUM,
#define OPT_RULE_IPV4 "rule_ipv4"
OPT_RULE_IPV4_NUM,
#define OPT_RULE_IPV6 "rule_ipv6"
usage_acl_alg(alg, sizeof(alg));
printf("%s [EAL options] -- -p PORTMASK -P"
- "--"OPT_RULE_IPV4"=FILE"
- "--"OPT_RULE_IPV6"=FILE"
+ " --"OPT_RULE_IPV4"=FILE"
+ " --"OPT_RULE_IPV6"=FILE"
" [--"OPT_CONFIG" (port,queue,lcore)[,(port,queue,lcore]]"
- " [--"OPT_ENBJMO" [--max-pkt-len PKTLEN]]\n"
+ " [--"OPT_MAX_PKT_LEN" PKTLEN]\n"
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
- " -P : enable promiscuous mode\n"
- " --"OPT_CONFIG": (port,queue,lcore): "
- "rx queues configuration\n"
+ " -P: enable promiscuous mode\n"
+ " --"OPT_CONFIG" (port,queue,lcore): rx queues configuration\n"
" --"OPT_NONUMA": optional, disable numa awareness\n"
- " --"OPT_ENBJMO": enable jumbo frame"
- " which max packet len is PKTLEN in decimal (64-9600)\n"
- " --"OPT_RULE_IPV4"=FILE: specify the ipv4 rules entries "
- "file. "
+ " --"OPT_MAX_PKT_LEN" PKTLEN: maximum packet length in decimal (64-9600)\n"
+ " --"OPT_RULE_IPV4"=FILE: specify the ipv4 rules entries file. "
"Each rule occupy one line. "
"2 kinds of rules are supported. "
"One is ACL entry at while line leads with character '%c', "
- "another is route entry at while line leads with "
- "character '%c'.\n"
- " --"OPT_RULE_IPV6"=FILE: specify the ipv6 rules "
- "entries file.\n"
+ "another is route entry at while line leads with character '%c'.\n"
+ " --"OPT_RULE_IPV6"=FILE: specify the ipv6 rules entries file.\n"
" --"OPT_ALG": ACL classify method to use, one of: %s\n",
prgname, ACL_LEAD_CHAR, ROUTE_LEAD_CHAR, alg);
}
int option_index;
char *prgname = argv[0];
static struct option lgopts[] = {
- {OPT_CONFIG, 1, NULL, OPT_CONFIG_NUM },
- {OPT_NONUMA, 0, NULL, OPT_NONUMA_NUM },
- {OPT_ENBJMO, 0, NULL, OPT_ENBJMO_NUM },
- {OPT_RULE_IPV4, 1, NULL, OPT_RULE_IPV4_NUM },
- {OPT_RULE_IPV6, 1, NULL, OPT_RULE_IPV6_NUM },
- {OPT_ALG, 1, NULL, OPT_ALG_NUM },
- {OPT_ETH_DEST, 1, NULL, OPT_ETH_DEST_NUM },
- {NULL, 0, 0, 0 }
+ {OPT_CONFIG, 1, NULL, OPT_CONFIG_NUM },
+ {OPT_NONUMA, 0, NULL, OPT_NONUMA_NUM },
+ {OPT_MAX_PKT_LEN, 1, NULL, OPT_MAX_PKT_LEN_NUM },
+ {OPT_RULE_IPV4, 1, NULL, OPT_RULE_IPV4_NUM },
+ {OPT_RULE_IPV6, 1, NULL, OPT_RULE_IPV6_NUM },
+ {OPT_ALG, 1, NULL, OPT_ALG_NUM },
+ {OPT_ETH_DEST, 1, NULL, OPT_ETH_DEST_NUM },
+ {NULL, 0, 0, 0 }
};
argvopt = argv;
numa_on = 0;
break;
- case OPT_ENBJMO_NUM:
- {
- struct option lenopts = {
- "max-pkt-len",
- required_argument,
- 0,
- 0
- };
-
- printf("jumbo frame is enabled\n");
- port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MULTI_SEGS;
-
- /*
- * if no max-pkt-len set, then use the
- * default value RTE_ETHER_MAX_LEN
- */
- if (getopt_long(argc, argvopt, "",
- &lenopts, &option_index) == 0) {
- ret = parse_max_pkt_len(optarg);
- if ((ret < 64) ||
- (ret > MAX_JUMBO_PKT_LEN)) {
- printf("invalid packet "
- "length\n");
- print_usage(prgname);
- return -1;
- }
- port_conf.rxmode.max_rx_pkt_len = ret;
- }
- printf("set jumbo frame max packet length "
- "to %u\n",
- (unsigned int)
- port_conf.rxmode.max_rx_pkt_len);
+ case OPT_MAX_PKT_LEN_NUM:
+ printf("Custom frame size is configured\n");
+ max_pkt_len = parse_max_pkt_len(optarg);
break;
- }
+
case OPT_RULE_IPV4_NUM:
parm_config.rule_ipv4_name = optarg;
break;
}
}
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+ uint32_t overhead_len;
+
+ if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+ overhead_len = max_rx_pktlen - max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+ struct rte_eth_dev_info *dev_info)
+{
+ uint32_t overhead_len;
+
+ if (max_pkt_len == 0)
+ return 0;
+
+ if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+ return -1;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+ dev_info->max_mtu);
+ conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+ if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ return 0;
+}
+
int
main(int argc, char **argv)
{
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
+ ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Invalid max packet length: %u (port %u)\n",
+ max_pkt_len, portid);
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.rx_adv_conf = {
},
};
+static uint32_t max_pkt_len;
+
static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
static struct rte_node_ethdev_config ethdev_conf[RTE_MAX_ETHPORTS];
" [-P]"
" --config (port,queue,lcore)[,(port,queue,lcore)]"
" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
- " [--enable-jumbo [--max-pkt-len PKTLEN]]"
+ " [--max-pkt-len PKTLEN]"
" [--no-numa]"
" [--per-port-pool]\n\n"
" --config (port,queue,lcore): Rx queue configuration\n"
" --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for "
"port X\n"
- " --enable-jumbo: Enable jumbo frames\n"
- " --max-pkt-len: Under the premise of enabling jumbo,\n"
- " maximum packet length in decimal (64-9600)\n"
+ " --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
" --no-numa: Disable numa awareness\n"
" --per-port-pool: Use separate buffer pool per port\n\n",
prgname);
#define CMD_LINE_OPT_CONFIG "config"
#define CMD_LINE_OPT_ETH_DEST "eth-dest"
#define CMD_LINE_OPT_NO_NUMA "no-numa"
-#define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
+#define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
#define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
enum {
/* Long options mapped to a short option */
CMD_LINE_OPT_CONFIG_NUM,
CMD_LINE_OPT_ETH_DEST_NUM,
CMD_LINE_OPT_NO_NUMA_NUM,
- CMD_LINE_OPT_ENABLE_JUMBO_NUM,
+ CMD_LINE_OPT_MAX_PKT_LEN_NUM,
CMD_LINE_OPT_PARSE_PER_PORT_POOL,
};
{CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
{CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
{CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
- {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
+ {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
{NULL, 0, 0, 0},
};
numa_on = 0;
break;
- case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
- const struct option lenopts = {"max-pkt-len",
- required_argument, 0, 0};
-
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
-
- /*
- * if no max-pkt-len set, use the default
- * value RTE_ETHER_MAX_LEN.
- */
- if (getopt_long(argc, argvopt, "", &lenopts,
- &option_index) == 0) {
- ret = parse_max_pkt_len(optarg);
- if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
- fprintf(stderr, "Invalid maximum "
- "packet length\n");
- print_usage(prgname);
- return -1;
- }
- port_conf.rxmode.max_rx_pkt_len = ret;
- }
+ case CMD_LINE_OPT_MAX_PKT_LEN_NUM: {
+ max_pkt_len = parse_max_pkt_len(optarg);
break;
}
}
/* >8 End of main processing loop. */
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+ uint32_t overhead_len;
+
+ if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+ overhead_len = max_rx_pktlen - max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+ struct rte_eth_dev_info *dev_info)
+{
+ uint32_t overhead_len;
+
+ if (max_pkt_len == 0)
+ return 0;
+
+ if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+ return -1;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+ dev_info->max_mtu);
+ conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+ if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ return 0;
+}
+
int
main(int argc, char **argv)
{
nb_rx_queue, n_tx_queue);
rte_eth_dev_info_get(portid, &dev_info);
+
+ ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Invalid max packet length: %u (port %u)\n",
+ max_pkt_len, portid);
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
}
};
+static uint32_t max_pkt_len;
+
static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
" [--config (port,queue,lcore)[,(port,queue,lcore]]"
" [--high-perf-cores CORELIST"
" [--perf-config (port,queue,hi_perf,lcore_index)[,(port,queue,hi_perf,lcore_index]]"
- " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
+ " [--max-pkt-len PKTLEN]\n"
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
- " -P : enable promiscuous mode\n"
+ " -P: enable promiscuous mode\n"
" --config (port,queue,lcore): rx queues configuration\n"
" --high-perf-cores CORELIST: list of high performance cores\n"
" --perf-config: similar as config, cores specified as indices"
" for bins containing high or regular performance cores\n"
" --no-numa: optional, disable numa awareness\n"
- " --enable-jumbo: enable jumbo frame"
- " which max packet len is PKTLEN in decimal (64-9600)\n"
+ " --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
" --parse-ptype: parse packet type by software\n"
" --legacy: use legacy interrupt-based scaling\n"
" --empty-poll: enable empty poll detection"
#define CMD_LINE_OPT_INTERRUPT_ONLY "interrupt-only"
#define CMD_LINE_OPT_TELEMETRY "telemetry"
#define CMD_LINE_OPT_PMD_MGMT "pmd-mgmt"
+#define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
/* Parse the argument given in the command line of the application */
static int
{"perf-config", 1, 0, 0},
{"high-perf-cores", 1, 0, 0},
{"no-numa", 0, 0, 0},
- {"enable-jumbo", 0, 0, 0},
+ {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, 0},
{CMD_LINE_OPT_EMPTY_POLL, 1, 0, 0},
{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
{CMD_LINE_OPT_LEGACY, 0, 0, 0},
}
if (!strncmp(lgopts[option_index].name,
- "enable-jumbo", 12)) {
- struct option lenopts =
- {"max-pkt-len", required_argument, \
- 0, 0};
-
- printf("jumbo frame is enabled \n");
- port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MULTI_SEGS;
-
- /**
- * if no max-pkt-len set, use the default value
- * RTE_ETHER_MAX_LEN
- */
- if (0 == getopt_long(argc, argvopt, "",
- &lenopts, &option_index)) {
- ret = parse_max_pkt_len(optarg);
- if ((ret < 64) ||
- (ret > MAX_JUMBO_PKT_LEN)){
- printf("invalid packet "
- "length\n");
- print_usage(prgname);
- return -1;
- }
- port_conf.rxmode.max_rx_pkt_len = ret;
- }
- printf("set jumbo frame "
- "max packet length to %u\n",
- (unsigned int)port_conf.rxmode.max_rx_pkt_len);
+ CMD_LINE_OPT_MAX_PKT_LEN,
+ sizeof(CMD_LINE_OPT_MAX_PKT_LEN))) {
+ printf("Custom frame size is configured\n");
+ max_pkt_len = parse_max_pkt_len(optarg);
}
if (!strncmp(lgopts[option_index].name,
}
}
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+ uint32_t overhead_len;
+
+ if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+ overhead_len = max_rx_pktlen - max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+ struct rte_eth_dev_info *dev_info)
+{
+ uint32_t overhead_len;
+
+ if (max_pkt_len == 0)
+ return 0;
+
+ if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+ return -1;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+ dev_info->max_mtu);
+ conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+ if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ return 0;
+}
+
/* Power library initialized in the main routine. 8< */
int
main(int argc, char **argv)
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
+ ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Invalid max packet length: %u (port %u)\n",
+ max_pkt_len, portid);
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
},
};
+static uint32_t max_pkt_len;
+
static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
static uint8_t lkp_per_socket[NB_SOCKETS];
" [--lookup]"
" --config (port,queue,lcore)[,(port,queue,lcore)]"
" [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
- " [--enable-jumbo [--max-pkt-len PKTLEN]]"
+ " [--max-pkt-len PKTLEN]"
" [--no-numa]"
" [--hash-entry-num]"
" [--ipv6]"
" Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base)\n"
" --config (port,queue,lcore): Rx queue configuration\n"
" --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
- " --enable-jumbo: Enable jumbo frames\n"
- " --max-pkt-len: Under the premise of enabling jumbo,\n"
- " maximum packet length in decimal (64-9600)\n"
+ " --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
" --no-numa: Disable numa awareness\n"
" --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
" --ipv6: Set if running ipv6 packets\n"
#define CMD_LINE_OPT_ETH_DEST "eth-dest"
#define CMD_LINE_OPT_NO_NUMA "no-numa"
#define CMD_LINE_OPT_IPV6 "ipv6"
-#define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
+#define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
#define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
#define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
#define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
CMD_LINE_OPT_ETH_DEST_NUM,
CMD_LINE_OPT_NO_NUMA_NUM,
CMD_LINE_OPT_IPV6_NUM,
- CMD_LINE_OPT_ENABLE_JUMBO_NUM,
+ CMD_LINE_OPT_MAX_PKT_LEN_NUM,
CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
CMD_LINE_OPT_PARSE_PTYPE_NUM,
CMD_LINE_OPT_PARSE_PER_PORT_POOL,
{CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
{CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
{CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
- {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
+ {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
{CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
{CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
{CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
ipv6 = 1;
break;
- case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
- const struct option lenopts = {
- "max-pkt-len", required_argument, 0, 0
- };
-
- port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
- port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
-
- /*
- * if no max-pkt-len set, use the default
- * value RTE_ETHER_MAX_LEN.
- */
- if (getopt_long(argc, argvopt, "",
- &lenopts, &option_index) == 0) {
- ret = parse_max_pkt_len(optarg);
- if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
- fprintf(stderr,
- "invalid maximum packet length\n");
- print_usage(prgname);
- return -1;
- }
- port_conf.rxmode.max_rx_pkt_len = ret;
- }
+ case CMD_LINE_OPT_MAX_PKT_LEN_NUM:
+ max_pkt_len = parse_max_pkt_len(optarg);
break;
- }
case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
ret = parse_hash_entry_number(optarg);
return 0;
}
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+ uint32_t overhead_len;
+
+ if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+ overhead_len = max_rx_pktlen - max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+ struct rte_eth_dev_info *dev_info)
+{
+ uint32_t overhead_len;
+
+ if (max_pkt_len == 0)
+ return 0;
+
+ if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+ return -1;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+ dev_info->max_mtu);
+ conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+ if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ return 0;
+}
+
static void
l3fwd_poll_resource_setup(void)
{
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
+ ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Invalid max packet length: %u (port %u)\n",
+ max_pkt_len, portid);
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
},
};
+static uint32_t max_pkt_len;
+
static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
printf("%s [EAL options] -- -p PORTMASK -P"
" [--rx (port,queue,lcore,thread)[,(port,queue,lcore,thread]]"
" [--tx (lcore,thread)[,(lcore,thread]]"
- " [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
+ " [--max-pkt-len PKTLEN]"
" [--parse-ptype]\n\n"
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
" -P : enable promiscuous mode\n"
" --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
" --no-numa: optional, disable numa awareness\n"
" --ipv6: optional, specify it if running ipv6 packets\n"
- " --enable-jumbo: enable jumbo frame"
- " which max packet len is PKTLEN in decimal (64-9600)\n"
+ " --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
" --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n"
" --no-lthreads: turn off lthread model\n"
" --parse-ptype: set to use software to analyze packet type\n\n",
OPT_NO_NUMA_NUM,
#define OPT_IPV6 "ipv6"
OPT_IPV6_NUM,
-#define OPT_ENABLE_JUMBO "enable-jumbo"
- OPT_ENABLE_JUMBO_NUM,
+#define OPT_MAX_PKT_LEN "max-pkt-len"
+ OPT_MAX_PKT_LEN_NUM,
#define OPT_HASH_ENTRY_NUM "hash-entry-num"
OPT_HASH_ENTRY_NUM_NUM,
#define OPT_NO_LTHREADS "no-lthreads"
{OPT_ETH_DEST, 1, NULL, OPT_ETH_DEST_NUM },
{OPT_NO_NUMA, 0, NULL, OPT_NO_NUMA_NUM },
{OPT_IPV6, 0, NULL, OPT_IPV6_NUM },
- {OPT_ENABLE_JUMBO, 0, NULL, OPT_ENABLE_JUMBO_NUM },
+ {OPT_MAX_PKT_LEN, 1, NULL, OPT_MAX_PKT_LEN_NUM },
{OPT_HASH_ENTRY_NUM, 1, NULL, OPT_HASH_ENTRY_NUM_NUM },
{OPT_NO_LTHREADS, 0, NULL, OPT_NO_LTHREADS_NUM },
{OPT_PARSE_PTYPE, 0, NULL, OPT_PARSE_PTYPE_NUM },
parse_ptype_on = 1;
break;
- case OPT_ENABLE_JUMBO_NUM:
- {
- struct option lenopts = {"max-pkt-len",
- required_argument, 0, 0};
-
- printf("jumbo frame is enabled - disabling simple TX path\n");
- port_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- port_conf.txmode.offloads |=
- DEV_TX_OFFLOAD_MULTI_SEGS;
-
- /* if no max-pkt-len set, use the default value
- * RTE_ETHER_MAX_LEN
- */
- if (getopt_long(argc, argvopt, "", &lenopts,
- &option_index) == 0) {
-
- ret = parse_max_pkt_len(optarg);
- if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)) {
- printf("invalid packet length\n");
- print_usage(prgname);
- return -1;
- }
- port_conf.rxmode.max_rx_pkt_len = ret;
- }
- printf("set jumbo frame max packet length to %u\n",
- (unsigned int)port_conf.rxmode.max_rx_pkt_len);
+ case OPT_MAX_PKT_LEN_NUM:
+ max_pkt_len = parse_max_pkt_len(optarg);
break;
- }
+
#if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
case OPT_HASH_ENTRY_NUM_NUM:
ret = parse_hash_entry_number(optarg);
}
}
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+ uint32_t overhead_len;
+
+ if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+ overhead_len = max_rx_pktlen - max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+ struct rte_eth_dev_info *dev_info)
+{
+ uint32_t overhead_len;
+
+ if (max_pkt_len == 0)
+ return 0;
+
+ if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+ return -1;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+ dev_info->max_mtu);
+ conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+ if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+ conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+ conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ return 0;
+}
+
int
main(int argc, char **argv)
{
"Error during getting device (port %u) info: %s\n",
portid, strerror(-ret));
+ ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Invalid max packet length: %u (port %u)\n",
+ max_pkt_len, portid);
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
echo "1.1 1 L-core per pcore (N=2)"
./build/l3fwd-thread -c ff -n 2 -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(1,0,0,0)" \
--tx="(1,0)" \
--stat-lcore 2 \
echo "1.2 1 L-core per pcore (N=4)"
./build/l3fwd-thread -c ff -n 2 -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(1,0,1,1)" \
--tx="(2,0)(3,1)" \
--stat-lcore 4 \
echo "1.3 1 L-core per pcore (N=8)"
./build/l3fwd-thread -c 1ff -n 2 -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(0,1,1,1)(1,0,2,2)(1,1,3,3)" \
--tx="(4,0)(5,1)(6,2)(7,3)" \
--stat-lcore 8 \
echo "1.3 1 L-core per pcore (N=16)"
./build/l3fwd-thread -c 3ffff -n 2 -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(0,1,1,1)(0,2,2,2)(0,3,3,3)(1,0,4,4)(1,1,5,5)(1,2,6,6)(1,3,7,7)" \
--tx="(8,0)(9,1)(10,2)(11,3)(12,4)(13,5)(14,6)(15,7)" \
--stat-lcore 16 \
echo "2.1 N L-core per pcore (N=2)"
./build/l3fwd-thread -c ff -n 2 --lcores="2,(0-1)@0" -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(1,0,0,0)" \
--tx="(1,0)" \
--stat-lcore 2 \
echo "2.2 N L-core per pcore (N=4)"
./build/l3fwd-thread -c ff -n 2 --lcores="(0-3)@0,4" -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(1,0,1,1)" \
--tx="(2,0)(3,1)" \
--stat-lcore 4 \
echo "2.3 N L-core per pcore (N=8)"
./build/l3fwd-thread -c 3ffff -n 2 --lcores="(0-7)@0,8" -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(0,1,1,1)(1,0,2,2)(1,1,3,3)" \
--tx="(4,0)(5,1)(6,2)(7,3)" \
--stat-lcore 8 \
echo "2.3 N L-core per pcore (N=16)"
./build/l3fwd-thread -c 3ffff -n 2 --lcores="(0-15)@0,16" -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(0,1,1,1)(0,2,2,2)(0,3,3,3)(1,0,4,4)(1,1,5,5)(1,2,6,6)(1,3,7,7)" \
--tx="(8,0)(9,1)(10,2)(11,3)(12,4)(13,5)(14,6)(15,7)" \
--stat-lcore 16 \
echo "3.1 N L-threads per pcore (N=2)"
./build/l3fwd-thread -c ff -n 2 -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(1,0,0,0)" \
--tx="(0,0)" \
--stat-lcore 1
echo "3.2 N L-threads per pcore (N=4)"
./build/l3fwd-thread -c ff -n 2 -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(1,0,0,1)" \
--tx="(0,0)(0,1)" \
--stat-lcore 1
echo "3.2 N L-threads per pcore (N=8)"
./build/l3fwd-thread -c ff -n 2 -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(0,1,0,1)(1,0,0,2)(1,1,0,3)" \
--tx="(0,0)(0,1)(0,2)(0,3)" \
--stat-lcore 1
echo "3.2 N L-threads per pcore (N=16)"
./build/l3fwd-thread -c ff -n 2 -- -P -p 3 \
- --enable-jumbo --max-pkt-len 1500 \
+ --max-pkt-len 1500 \
--rx="(0,0,0,0)(0,1,0,1)(0,2,0,2)(0,0,0,3)(1,0,0,4)(1,1,0,5)(1,2,0,6)(1,3,0,7)" \
--tx="(0,0)(0,1)(0,2)(0,3)(0,4)(0,5)(0,6)(0,7)" \
--stat-lcore 1
.link_speeds = 0,
.rxmode = {
.mq_mode = ETH_MQ_RX_NONE,
- .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+ .mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
.split_hdr_size = 0, /* Header split buffer size */
},
.rx_adv_conf = {
uint8_t ptp_enabled_port_nb;
static uint8_t ptp_enabled_ports[RTE_MAX_ETHPORTS];
-static const struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
- },
-};
-
static const struct rte_ether_addr ether_multicast = {
.addr_bytes = {0x01, 0x1b, 0x19, 0x0, 0x0, 0x0}
};
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
- struct rte_eth_conf port_conf = port_conf_default;
+ struct rte_eth_conf port_conf;
const uint16_t rx_rings = 1;
const uint16_t tx_rings = 1;
int retval;
if (!rte_eth_dev_is_valid_port(port))
return -1;
+ memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
retval = rte_eth_dev_info_get(port, &dev_info);
if (retval != 0) {
printf("Error during getting device (port %u) info: %s\n",
static struct rte_eth_conf port_conf = {
.rxmode = {
.mq_mode = ETH_MQ_RX_RSS,
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
.offloads = DEV_RX_OFFLOAD_CHECKSUM,
},
static struct rte_eth_conf port_conf = {
.rxmode = {
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
.split_hdr_size = 0,
},
.txmode = {
static const char usage[] =
"%s EAL_ARGS -- [-t]\n";
-static const struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
- },
-};
-
static struct {
uint64_t total_cycles;
uint64_t total_queue_cycles;
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
- struct rte_eth_conf port_conf = port_conf_default;
+ struct rte_eth_conf port_conf;
const uint16_t rx_rings = 1, tx_rings = 1;
uint16_t nb_rxd = RX_RING_SIZE;
uint16_t nb_txd = TX_RING_SIZE;
if (!rte_eth_dev_is_valid_port(port))
return -1;
+ memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
retval = rte_eth_dev_info_get(port, &dev_info);
if (retval != 0) {
printf("Error during getting device (port %u) info: %s\n",
#define MBUF_CACHE_SIZE 250
#define BURST_SIZE 32
-/* Configuration of ethernet ports. 8< */
-static const struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
- },
-};
-/* >8 End of configuration of ethernet ports. */
-
/* basicfwd.c: Basic DPDK skeleton forwarding example. */
/*
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
- struct rte_eth_conf port_conf = port_conf_default;
+ struct rte_eth_conf port_conf;
const uint16_t rx_rings = 1, tx_rings = 1;
uint16_t nb_rxd = RX_RING_SIZE;
uint16_t nb_txd = TX_RING_SIZE;
if (!rte_eth_dev_is_valid_port(port))
return -1;
+ memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
retval = rte_eth_dev_info_get(port, &dev_info);
if (retval != 0) {
printf("Error during getting device (port %u) info: %s\n",
#define BURST_RX_RETRIES 4 /* Number of retries on RX. */
#define JUMBO_FRAME_MAX_SIZE 0x2600
+#define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
/* State of virtio device. */
#define DEVICE_MAC_LEARNING 0
if (ret) {
vmdq_conf_default.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
- vmdq_conf_default.rxmode.max_rx_pkt_len
- = JUMBO_FRAME_MAX_SIZE;
+ vmdq_conf_default.rxmode.mtu = MAX_MTU;
}
break;
static uint32_t enabled_port_mask;
static volatile bool force_quit;
-/****************/
-static const struct rte_eth_conf port_conf_default = {
- .rxmode = {
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
- },
-};
-
static inline int
port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
- struct rte_eth_conf port_conf = port_conf_default;
+ struct rte_eth_conf port_conf;
const uint16_t rx_rings = 1, tx_rings = 1;
int retval;
uint16_t q;
if (!rte_eth_dev_is_valid_port(port))
return -1;
+ memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
retval = rte_eth_dev_info_get(port, &dev_info);
if (retval != 0) {
printf("Error during getting device (port %u) info: %s\n",
return ret;
}
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+ uint32_t overhead_len;
+
+ if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+ overhead_len = max_rx_pktlen - max_mtu;
+ else
+ overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return overhead_len;
+}
+
int
rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_conf orig_conf;
- uint16_t overhead_len;
+ uint32_t max_rx_pktlen;
+ uint32_t overhead_len;
int diag;
int ret;
uint16_t old_mtu;
goto rollback;
/* Get the real Ethernet overhead length */
- if (dev_info.max_mtu != UINT16_MAX &&
- dev_info.max_rx_pktlen > dev_info.max_mtu)
- overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
- else
- overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+ overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+ dev_info.max_mtu);
/* If number of queues specified by application for both Rx and Tx is
* zero, use driver preferred values. This cannot be done individually
}
/*
- * If jumbo frames are enabled, check that the maximum RX packet
- * length is supported by the configured device.
+ * Check that the maximum RX packet length is supported by the
+ * configured device.
*/
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
- if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
- RTE_ETHDEV_LOG(ERR,
- "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
- port_id, dev_conf->rxmode.max_rx_pkt_len,
- dev_info.max_rx_pktlen);
- ret = -EINVAL;
- goto rollback;
- } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
- RTE_ETHDEV_LOG(ERR,
- "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
- port_id, dev_conf->rxmode.max_rx_pkt_len,
- (unsigned int)RTE_ETHER_MIN_LEN);
- ret = -EINVAL;
- goto rollback;
- }
+ if (dev_conf->rxmode.mtu == 0)
+ dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
+ max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
+ if (max_rx_pktlen > dev_info.max_rx_pktlen) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u max_rx_pktlen %u > max valid value %u\n",
+ port_id, max_rx_pktlen, dev_info.max_rx_pktlen);
+ ret = -EINVAL;
+ goto rollback;
+ } else if (max_rx_pktlen < RTE_ETHER_MIN_LEN) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%u max_rx_pktlen %u < min valid value %u\n",
+ port_id, max_rx_pktlen, RTE_ETHER_MIN_LEN);
+ ret = -EINVAL;
+ goto rollback;
+ }
- /* Scale the MTU size to adapt max_rx_pkt_len */
- dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
- overhead_len;
- } else {
- uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
- if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
- pktlen > RTE_ETHER_MTU + overhead_len)
+ if ((dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
+ if (dev->data->dev_conf.rxmode.mtu < RTE_ETHER_MIN_MTU ||
+ dev->data->dev_conf.rxmode.mtu > RTE_ETHER_MTU)
/* Use default value */
- dev->data->dev_conf.rxmode.max_rx_pkt_len =
- RTE_ETHER_MTU + overhead_len;
+ dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
}
+ dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
+
/*
* If LRO is enabled, check that the maximum aggregated packet
* size is supported by the configured device.
*/
if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
if (dev_conf->rxmode.max_lro_pkt_size == 0)
- dev->data->dev_conf.rxmode.max_lro_pkt_size =
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
ret = eth_dev_check_lro_pkt_size(port_id,
dev->data->dev_conf.rxmode.max_lro_pkt_size,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ max_rx_pktlen,
dev_info.max_lro_pkt_size);
if (ret != 0)
goto rollback;
* If LRO is enabled, check that the maximum aggregated packet
* size is supported by the configured device.
*/
+ /* Get the real Ethernet overhead length */
if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ uint32_t overhead_len;
+ uint32_t max_rx_pktlen;
+ int ret;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+ dev_info.max_mtu);
+ max_rx_pktlen = dev->data->mtu + overhead_len;
if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
- dev->data->dev_conf.rxmode.max_lro_pkt_size =
- dev->data->dev_conf.rxmode.max_rx_pkt_len;
- int ret = eth_dev_check_lro_pkt_size(port_id,
+ dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
+ ret = eth_dev_check_lro_pkt_size(port_id,
dev->data->dev_conf.rxmode.max_lro_pkt_size,
- dev->data->dev_conf.rxmode.max_rx_pkt_len,
+ max_rx_pktlen,
dev_info.max_lro_pkt_size);
if (ret != 0)
return ret;
struct rte_eth_rxmode {
/** The multi-queue packet distribution mode to be used, e.g. RSS. */
enum rte_eth_rx_mq_mode mq_mode;
- uint32_t max_rx_pkt_len; /**< Only used if JUMBO_FRAME enabled. */
+ uint32_t mtu; /**< Requested MTU. */
/** Maximum allowed size of LRO aggregated packet. */
uint32_t max_lro_pkt_size;
uint16_t split_hdr_size; /**< hdr buf size (header_split enabled).*/
rte_trace_point_emit_u16(nb_tx_q);
rte_trace_point_emit_u32(dev_conf->link_speeds);
rte_trace_point_emit_u32(dev_conf->rxmode.mq_mode);
- rte_trace_point_emit_u32(dev_conf->rxmode.max_rx_pkt_len);
+ rte_trace_point_emit_u32(dev_conf->rxmode.mtu);
rte_trace_point_emit_u64(dev_conf->rxmode.offloads);
rte_trace_point_emit_u32(dev_conf->txmode.mq_mode);
rte_trace_point_emit_u64(dev_conf->txmode.offloads);