]> git.droids-corp.org - dpdk.git/commitdiff
ethdev: fix max Rx packet length
authorFerruh Yigit <ferruh.yigit@intel.com>
Mon, 18 Oct 2021 13:48:48 +0000 (14:48 +0100)
committerFerruh Yigit <ferruh.yigit@intel.com>
Mon, 18 Oct 2021 17:20:20 +0000 (19:20 +0200)
There is a confusion on setting max Rx packet length, this patch aims to
clarify it.

'rte_eth_dev_configure()' API accepts max Rx packet size via
'uint32_t max_rx_pkt_len' field of the config struct 'struct
rte_eth_conf'.

Also 'rte_eth_dev_set_mtu()' API can be used to set the MTU, and result
stored into '(struct rte_eth_dev)->data->mtu'.

These two APIs are related but they work in a disconnected way, they
store the set values in different variables which makes hard to figure
out which one to use, also having two different method for a related
functionality is confusing for the users.

Other issues causing confusion is:
* maximum transmission unit (MTU) is payload of the Ethernet frame. And
  'max_rx_pkt_len' is the size of the Ethernet frame. Difference is
  Ethernet frame overhead, and this overhead may be different from
  device to device based on what device supports, like VLAN and QinQ.
* 'max_rx_pkt_len' is only valid when application requested jumbo frame,
  which adds additional confusion and some APIs and PMDs already
  discards this documented behavior.
* For the jumbo frame enabled case, 'max_rx_pkt_len' is an mandatory
  field, this adds configuration complexity for application.

As solution, both APIs gets MTU as parameter, and both saves the result
in same variable '(struct rte_eth_dev)->data->mtu'. For this
'max_rx_pkt_len' updated as 'mtu', and it is always valid independent
from jumbo frame.

For 'rte_eth_dev_configure()', 'dev->data->dev_conf.rxmode.mtu' is user
request and it should be used only within configure function and result
should be stored to '(struct rte_eth_dev)->data->mtu'. After that point
both application and PMD uses MTU from this variable.

When application doesn't provide an MTU during 'rte_eth_dev_configure()'
default 'RTE_ETHER_MTU' value is used.

Additional clarification done on scattered Rx configuration, in
relation to MTU and Rx buffer size.
MTU is used to configure the device for physical Rx/Tx size limitation,
Rx buffer is where to store Rx packets, many PMDs use mbuf data buffer
size as Rx buffer size.
PMDs compare MTU against Rx buffer size to decide enabling scattered Rx
or not. If scattered Rx is not supported by device, MTU bigger than Rx
buffer size should fail.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
Acked-by: Ajit Khaparde <ajit.khaparde@broadcom.com>
Acked-by: Somnath Kotur <somnath.kotur@broadcom.com>
Acked-by: Huisong Li <lihuisong@huawei.com>
Acked-by: Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>
Acked-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
Acked-by: Rosen Xu <rosen.xu@intel.com>
Acked-by: Hyong Youb Kim <hyonkim@cisco.com>
121 files changed:
app/test-eventdev/test_perf_common.c
app/test-eventdev/test_pipeline_common.c
app/test-pmd/cmdline.c
app/test-pmd/config.c
app/test-pmd/parameters.c
app/test-pmd/testpmd.c
app/test-pmd/testpmd.h
app/test/test_link_bonding.c
app/test/test_link_bonding_mode4.c
app/test/test_link_bonding_rssconf.c
app/test/test_pmd_perf.c
doc/guides/nics/dpaa.rst
doc/guides/nics/dpaa2.rst
doc/guides/nics/features.rst
doc/guides/nics/fm10k.rst
doc/guides/nics/mlx5.rst
doc/guides/nics/octeontx.rst
doc/guides/nics/thunderx.rst
doc/guides/rel_notes/deprecation.rst
doc/guides/sample_app_ug/flow_classify.rst
doc/guides/sample_app_ug/l3_forward.rst
doc/guides/sample_app_ug/l3_forward_access_ctrl.rst
doc/guides/sample_app_ug/l3_forward_graph.rst
doc/guides/sample_app_ug/l3_forward_power_man.rst
doc/guides/sample_app_ug/performance_thread.rst
doc/guides/sample_app_ug/skeleton.rst
drivers/net/atlantic/atl_ethdev.c
drivers/net/avp/avp_ethdev.c
drivers/net/axgbe/axgbe_ethdev.c
drivers/net/bnx2x/bnx2x_ethdev.c
drivers/net/bnxt/bnxt_ethdev.c
drivers/net/bonding/rte_eth_bond_pmd.c
drivers/net/cnxk/cnxk_ethdev.c
drivers/net/cnxk/cnxk_ethdev_ops.c
drivers/net/cxgbe/cxgbe_ethdev.c
drivers/net/cxgbe/cxgbe_main.c
drivers/net/cxgbe/sge.c
drivers/net/dpaa/dpaa_ethdev.c
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/e1000/em_ethdev.c
drivers/net/e1000/igb_ethdev.c
drivers/net/e1000/igb_rxtx.c
drivers/net/ena/ena_ethdev.c
drivers/net/enetc/enetc_ethdev.c
drivers/net/enic/enic_ethdev.c
drivers/net/enic/enic_main.c
drivers/net/fm10k/fm10k_ethdev.c
drivers/net/hinic/hinic_pmd_ethdev.c
drivers/net/hns3/hns3_ethdev.c
drivers/net/hns3/hns3_ethdev_vf.c
drivers/net/hns3/hns3_rxtx.c
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_rxtx.c
drivers/net/iavf/iavf_ethdev.c
drivers/net/ice/ice_dcf_ethdev.c
drivers/net/ice/ice_ethdev.c
drivers/net/ice/ice_rxtx.c
drivers/net/igc/igc_ethdev.c
drivers/net/igc/igc_ethdev.h
drivers/net/igc/igc_txrx.c
drivers/net/ionic/ionic_ethdev.c
drivers/net/ionic/ionic_rxtx.c
drivers/net/ipn3ke/ipn3ke_representor.c
drivers/net/ixgbe/ixgbe_ethdev.c
drivers/net/ixgbe/ixgbe_pf.c
drivers/net/ixgbe/ixgbe_rxtx.c
drivers/net/liquidio/lio_ethdev.c
drivers/net/mlx4/mlx4_rxq.c
drivers/net/mlx5/mlx5_rxq.c
drivers/net/mvneta/mvneta_ethdev.c
drivers/net/mvneta/mvneta_rxtx.c
drivers/net/mvpp2/mrvl_ethdev.c
drivers/net/nfp/nfp_common.c
drivers/net/octeontx/octeontx_ethdev.c
drivers/net/octeontx2/otx2_ethdev.c
drivers/net/octeontx2/otx2_ethdev_ops.c
drivers/net/pfe/pfe_ethdev.c
drivers/net/qede/qede_ethdev.c
drivers/net/qede/qede_rxtx.c
drivers/net/sfc/sfc_ethdev.c
drivers/net/sfc/sfc_port.c
drivers/net/tap/rte_eth_tap.c
drivers/net/thunderx/nicvf_ethdev.c
drivers/net/txgbe/txgbe_ethdev.c
drivers/net/txgbe/txgbe_ethdev.h
drivers/net/txgbe/txgbe_ethdev_vf.c
drivers/net/txgbe/txgbe_rxtx.c
drivers/net/virtio/virtio_ethdev.c
examples/bbdev_app/main.c
examples/bond/main.c
examples/distributor/main.c
examples/eventdev_pipeline/pipeline_worker_generic.c
examples/eventdev_pipeline/pipeline_worker_tx.c
examples/flow_classify/flow_classify.c
examples/ioat/ioatfwd.c
examples/ip_fragmentation/main.c
examples/ip_pipeline/link.c
examples/ip_reassembly/main.c
examples/ipsec-secgw/ipsec-secgw.c
examples/ipv4_multicast/main.c
examples/kni/main.c
examples/l2fwd-cat/l2fwd-cat.c
examples/l2fwd-crypto/main.c
examples/l2fwd-event/l2fwd_common.c
examples/l3fwd-acl/main.c
examples/l3fwd-graph/main.c
examples/l3fwd-power/main.c
examples/l3fwd/main.c
examples/performance-thread/l3fwd-thread/main.c
examples/performance-thread/l3fwd-thread/test.sh
examples/pipeline/obj.c
examples/ptpclient/ptpclient.c
examples/qos_meter/main.c
examples/qos_sched/init.c
examples/rxtx_callbacks/main.c
examples/skeleton/basicfwd.c
examples/vhost/main.c
examples/vm_power_manager/main.c
lib/ethdev/rte_ethdev.c
lib/ethdev/rte_ethdev.h
lib/ethdev/rte_ethdev_trace.h

index cc100650c21e4a9e814b179564c3a41849b215e3..660d5a0364b64727b84be2ef8d1145a58c60b911 100644 (file)
@@ -669,7 +669,6 @@ perf_ethdev_setup(struct evt_test *test, struct evt_options *opt)
        struct rte_eth_conf port_conf = {
                .rxmode = {
                        .mq_mode = ETH_MQ_RX_RSS,
-                       .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                        .split_hdr_size = 0,
                },
                .rx_adv_conf = {
index 6ee530d4cdc96c8df2007fd6450e8673e1255b0c..5fcea74b4d439909a7edf603ab1088dcf3aa1b92 100644 (file)
@@ -197,8 +197,9 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
                return -EINVAL;
        }
 
-       port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz;
-       if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN)
+       port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
+               RTE_ETHER_CRC_LEN;
+       if (port_conf.rxmode.mtu > RTE_ETHER_MTU)
                port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        t->internal_port = 1;
index b8f06063d2250182ab268de96fb27f2b6a9aee85..f777cc453836a103f65049d573f45bd6f521f350 100644 (file)
@@ -1880,45 +1880,38 @@ cmd_config_max_pkt_len_parsed(void *parsed_result,
                                __rte_unused void *data)
 {
        struct cmd_config_max_pkt_len_result *res = parsed_result;
-       uint32_t max_rx_pkt_len_backup = 0;
-       portid_t pid;
+       portid_t port_id;
        int ret;
 
+       if (strcmp(res->name, "max-pkt-len") != 0) {
+               printf("Unknown parameter\n");
+               return;
+       }
+
        if (!all_ports_stopped()) {
                fprintf(stderr, "Please stop all ports first\n");
                return;
        }
 
-       RTE_ETH_FOREACH_DEV(pid) {
-               struct rte_port *port = &ports[pid];
+       RTE_ETH_FOREACH_DEV(port_id) {
+               struct rte_port *port = &ports[port_id];
 
-               if (!strcmp(res->name, "max-pkt-len")) {
-                       if (res->value < RTE_ETHER_MIN_LEN) {
-                               fprintf(stderr,
-                                       "max-pkt-len can not be less than %d\n",
-                                       RTE_ETHER_MIN_LEN);
-                               return;
-                       }
-                       if (res->value == port->dev_conf.rxmode.max_rx_pkt_len)
-                               return;
-
-                       ret = eth_dev_info_get_print_err(pid, &port->dev_info);
-                       if (ret != 0) {
-                               fprintf(stderr,
-                                       "rte_eth_dev_info_get() failed for port %u\n",
-                                       pid);
-                               return;
-                       }
-
-                       max_rx_pkt_len_backup = port->dev_conf.rxmode.max_rx_pkt_len;
+               if (res->value < RTE_ETHER_MIN_LEN) {
+                       fprintf(stderr,
+                               "max-pkt-len can not be less than %d\n",
+                               RTE_ETHER_MIN_LEN);
+                       return;
+               }
 
-                       port->dev_conf.rxmode.max_rx_pkt_len = res->value;
-                       if (update_jumbo_frame_offload(pid) != 0)
-                               port->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len_backup;
-               } else {
-                       fprintf(stderr, "Unknown parameter\n");
+               ret = eth_dev_info_get_print_err(port_id, &port->dev_info);
+               if (ret != 0) {
+                       fprintf(stderr,
+                               "rte_eth_dev_info_get() failed for port %u\n",
+                               port_id);
                        return;
                }
+
+               update_jumbo_frame_offload(port_id, res->value);
        }
 
        init_port_config();
index f83a1abb09cf6afb3cb30795c1cb34707c9a855c..333d3dd6225914b8d15ef387076fb6989108dd79 100644 (file)
@@ -1209,7 +1209,6 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
        int diag;
        struct rte_port *rte_port = &ports[port_id];
        struct rte_eth_dev_info dev_info;
-       uint16_t eth_overhead;
        int ret;
 
        if (port_id_is_invalid(port_id, ENABLED_WARN))
@@ -1226,21 +1225,18 @@ port_mtu_set(portid_t port_id, uint16_t mtu)
                return;
        }
        diag = rte_eth_dev_set_mtu(port_id, mtu);
-       if (diag)
+       if (diag != 0) {
                fprintf(stderr, "Set MTU failed. diag=%d\n", diag);
-       else if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               /*
-                * Ether overhead in driver is equal to the difference of
-                * max_rx_pktlen and max_mtu in rte_eth_dev_info when the
-                * device supports jumbo frame.
-                */
-               eth_overhead = dev_info.max_rx_pktlen - dev_info.max_mtu;
-               if (mtu > RTE_ETHER_MTU) {
+               return;
+       }
+
+       rte_port->dev_conf.rxmode.mtu = mtu;
+
+       if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+               if (mtu > RTE_ETHER_MTU)
                        rte_port->dev_conf.rxmode.offloads |=
                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
-                       rte_port->dev_conf.rxmode.max_rx_pkt_len =
-                                               mtu + eth_overhead;
-               } else
+               else
                        rte_port->dev_conf.rxmode.offloads &=
                                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
        }
index 2e677236306286daa3ab07c868d313778d5cb10c..779a721fa0581e0735facdb053bd444f2f0b2a36 100644 (file)
@@ -951,7 +951,7 @@ launch_args_parse(int argc, char** argv)
                        if (!strcmp(lgopts[opt_idx].name, "max-pkt-len")) {
                                n = atoi(optarg);
                                if (n >= RTE_ETHER_MIN_LEN)
-                                       rx_mode.max_rx_pkt_len = (uint32_t) n;
+                                       max_rx_pkt_len = n;
                                else
                                        rte_exit(EXIT_FAILURE,
                                                 "Invalid max-pkt-len=%d - should be > %d\n",
index 909b6571dc2613fbe72273e88a97fcee7d319fe4..50d0ec4fe3db775ae6ae4465e4c884948c43fd69 100644 (file)
@@ -219,6 +219,11 @@ unsigned int xstats_display_num; /**< Size of extended statistics to show */
  */
 uint8_t f_quit;
 
+/*
+ * Max Rx frame size, set by '--max-pkt-len' parameter.
+ */
+uint32_t max_rx_pkt_len;
+
 /*
  * Configuration of packet segments used to scatter received packets
  * if some of split features is configured.
@@ -451,13 +456,7 @@ lcoreid_t latencystats_lcore_id = -1;
 /*
  * Ethernet device configuration.
  */
-struct rte_eth_rxmode rx_mode = {
-       /* Default maximum frame length.
-        * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
-        * in init_config().
-        */
-       .max_rx_pkt_len = 0,
-};
+struct rte_eth_rxmode rx_mode;
 
 struct rte_eth_txmode tx_mode = {
        .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE,
@@ -1542,11 +1541,24 @@ check_nb_hairpinq(queueid_t hairpinq)
        return 0;
 }
 
+static int
+get_eth_overhead(struct rte_eth_dev_info *dev_info)
+{
+       uint32_t eth_overhead;
+
+       if (dev_info->max_mtu != UINT16_MAX &&
+           dev_info->max_rx_pktlen > dev_info->max_mtu)
+               eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
+       else
+               eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+       return eth_overhead;
+}
+
 static void
 init_config_port_offloads(portid_t pid, uint32_t socket_id)
 {
        struct rte_port *port = &ports[pid];
-       uint16_t data_size;
        int ret;
        int i;
 
@@ -1560,7 +1572,7 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
        if (ret != 0)
                rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
 
-       ret = update_jumbo_frame_offload(pid);
+       ret = update_jumbo_frame_offload(pid, 0);
        if (ret != 0)
                fprintf(stderr,
                        "Updating jumbo frame offload failed for port %u\n",
@@ -1580,6 +1592,10 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
        if (eth_link_speed)
                port->dev_conf.link_speeds = eth_link_speed;
 
+       if (max_rx_pkt_len)
+               port->dev_conf.rxmode.mtu = max_rx_pkt_len -
+                       get_eth_overhead(&port->dev_info);
+
        /* set flag to initialize port/queue */
        port->need_reconfig = 1;
        port->need_reconfig_queues = 1;
@@ -1592,14 +1608,20 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id)
         */
        if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
            port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
-               data_size = rx_mode.max_rx_pkt_len /
-                       port->dev_info.rx_desc_lim.nb_mtu_seg_max;
-
-               if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) {
-                       mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM;
-                       TESTPMD_LOG(WARNING,
-                                   "Configured mbuf size of the first segment %hu\n",
-                                   mbuf_data_size[0]);
+               uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
+               uint16_t mtu;
+
+               if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
+                       uint16_t data_size = (mtu + eth_overhead) /
+                               port->dev_info.rx_desc_lim.nb_mtu_seg_max;
+                       uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
+
+                       if (buffer_size > mbuf_data_size[0]) {
+                               mbuf_data_size[0] = buffer_size;
+                               TESTPMD_LOG(WARNING,
+                                       "Configured mbuf size of the first segment %hu\n",
+                                       mbuf_data_size[0]);
+                       }
                }
        }
 }
@@ -2735,6 +2757,7 @@ start_port(portid_t pid)
                                        pi);
                                return -1;
                        }
+
                        /* configure port */
                        diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq,
                                                     nb_txq + nb_hairpinq,
@@ -3669,44 +3692,45 @@ rxtx_port_config(struct rte_port *port)
 
 /*
  * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
- * MTU is also aligned if JUMBO_FRAME offload is not set.
+ * MTU is also aligned.
  *
  * port->dev_info should be set before calling this function.
  *
+ * if 'max_rx_pktlen' is zero, it is set to current device value, "MTU +
+ * ETH_OVERHEAD". This is useful to update flags but not MTU value.
+ *
  * return 0 on success, negative on error
  */
 int
-update_jumbo_frame_offload(portid_t portid)
+update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen)
 {
        struct rte_port *port = &ports[portid];
        uint32_t eth_overhead;
        uint64_t rx_offloads;
-       int ret;
+       uint16_t mtu, new_mtu;
        bool on;
 
-       /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
-       if (port->dev_info.max_mtu != UINT16_MAX &&
-           port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
-               eth_overhead = port->dev_info.max_rx_pktlen -
-                               port->dev_info.max_mtu;
-       else
-               eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+       eth_overhead = get_eth_overhead(&port->dev_info);
 
-       rx_offloads = port->dev_conf.rxmode.offloads;
+       if (rte_eth_dev_get_mtu(portid, &mtu) != 0) {
+               printf("Failed to get MTU for port %u\n", portid);
+               return -1;
+       }
 
-       /* Default config value is 0 to use PMD specific overhead */
-       if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
-               port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
+       if (max_rx_pktlen == 0)
+               max_rx_pktlen = mtu + eth_overhead;
+
+       rx_offloads = port->dev_conf.rxmode.offloads;
+       new_mtu = max_rx_pktlen - eth_overhead;
 
-       if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
+       if (new_mtu <= RTE_ETHER_MTU) {
                rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
                on = false;
        } else {
                if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
                        fprintf(stderr,
                                "Frame size (%u) is not supported by port %u\n",
-                               port->dev_conf.rxmode.max_rx_pkt_len,
-                               portid);
+                               max_rx_pktlen, portid);
                        return -1;
                }
                rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -3727,19 +3751,18 @@ update_jumbo_frame_offload(portid_t portid)
                }
        }
 
-       /* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
-        * if unset do it here
-        */
-       if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
-               ret = eth_dev_set_mtu_mp(portid,
-                               port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
-               if (ret)
-                       fprintf(stderr,
-                               "Failed to set MTU to %u for port %u\n",
-                               port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
-                               portid);
+       if (mtu == new_mtu)
+               return 0;
+
+       if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) {
+               fprintf(stderr,
+                       "Failed to set MTU to %u for port %u\n",
+                       new_mtu, portid);
+               return -1;
        }
 
+       port->dev_conf.rxmode.mtu = new_mtu;
+
        return 0;
 }
 
index 39f464f1ee167d32c14b3813f721d0ea75e66bc6..42a597596fddec47f051b6945c5dd38b7d20a80b 100644 (file)
@@ -467,6 +467,8 @@ extern uint8_t bitrate_enabled;
 
 extern struct rte_fdir_conf fdir_conf;
 
+extern uint32_t max_rx_pkt_len;
+
 /*
  * Configuration of packet segments used to scatter received packets
  * if some of split features is configured.
@@ -1043,7 +1045,7 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue,
                         __rte_unused void *user_param);
 void add_tx_dynf_callback(portid_t portid);
 void remove_tx_dynf_callback(portid_t portid);
-int update_jumbo_frame_offload(portid_t portid);
+int update_jumbo_frame_offload(portid_t portid, uint32_t max_rx_pktlen);
 
 /*
  * Work-around of a compilation error with ICC on invocations of the
index 8a5c8310a8b4a3302f6e0db7b4f1555db70a7144..5388d18125a66914195a2b66c7a4747a53d478cb 100644 (file)
@@ -136,7 +136,6 @@ static struct rte_eth_conf default_pmd_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
                .split_hdr_size = 0,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
        },
        .txmode = {
                .mq_mode = ETH_MQ_TX_NONE,
index f120b2e3be24c487812c9c15c4aacc20fd6cb70f..189d2430f27e62c08762129ae2fbe6b3633a6d45 100644 (file)
@@ -108,7 +108,6 @@ static struct link_bonding_unittest_params test_params  = {
 static struct rte_eth_conf default_pmd_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .txmode = {
index 5dac60ca1edd64f2d4988c06ba6c30830fc9c03b..e7bb0497b66369582cfadce37cb3d054d97c647f 100644 (file)
@@ -81,7 +81,6 @@ static struct link_bonding_rssconf_unittest_params test_params  = {
 static struct rte_eth_conf default_pmd_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .txmode = {
@@ -93,7 +92,6 @@ static struct rte_eth_conf default_pmd_conf = {
 static struct rte_eth_conf rss_pmd_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .txmode = {
index 3a248d512c4a33c11c16b49f553c5b5eb1330c1c..a3b4f52c65e6ac71e0ba6ac9384d0f3d07868605 100644 (file)
@@ -63,7 +63,6 @@ static struct rte_ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .txmode = {
index 7355ec3059166e3ba6a86751c7a53394ae94f561..9dad612058c6bba490a02cea114ec13796d7d141 100644 (file)
@@ -335,7 +335,7 @@ Maximum packet length
 ~~~~~~~~~~~~~~~~~~~~~
 
 The DPAA SoC family support a maximum of a 10240 jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
 member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
 up to 10240 bytes can still reach the host interface.
 
index df23a5704dcaac826c4b6da883ec4f56a5f38dbe..831bc564883a694ec8591bfa962fa48a89ff6a69 100644 (file)
@@ -545,7 +545,7 @@ Maximum packet length
 ~~~~~~~~~~~~~~~~~~~~~
 
 The DPAA2 SoC family support a maximum of a 10240 jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
 member of ``struct rte_eth_conf`` is set to a value lower than 10240, frames
 up to 10240 bytes can still reach the host interface.
 
index f883f11a8b1960421890adb0a4fcb9f48654787a..79bce2784195ab359c86616dc28d74c669a54d39 100644 (file)
@@ -166,7 +166,7 @@ Jumbo frame
 Supports Rx jumbo frames.
 
 * **[uses]    rte_eth_rxconf,rte_eth_rxmode**: ``offloads:DEV_RX_OFFLOAD_JUMBO_FRAME``.
-  ``dev_conf.rxmode.max_rx_pkt_len``.
+  ``dev_conf.rxmode.mtu``.
 * **[related] rte_eth_dev_info**: ``max_rx_pktlen``.
 * **[related] API**: ``rte_eth_dev_set_mtu()``.
 
index 7b8ef0e7823da825099d2d9944b0ec9d131406ba..ed6afd62703d5fd4b29d6ec68c67c1a7466e27f8 100644 (file)
@@ -141,7 +141,7 @@ Maximum packet length
 ~~~~~~~~~~~~~~~~~~~~~
 
 The FM10000 family of NICS support a maximum of a 15K jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
 member of ``struct rte_eth_conf`` is set to a value lower than 15364, frames
 up to 15364 bytes can still reach the host interface.
 
index aabab6a9366713f8ff2ac11eb6063edb83367589..7b540504f9c2d819aebadcc2e17e9a0dbff3da33 100644 (file)
@@ -606,9 +606,9 @@ Driver options
   and each stride receives one packet. MPRQ can improve throughput for
   small-packet traffic.
 
-  When MPRQ is enabled, max_rx_pkt_len can be larger than the size of
+  When MPRQ is enabled, MTU can be larger than the size of
   user-provided mbuf even if DEV_RX_OFFLOAD_SCATTER isn't enabled. PMD will
-  configure large stride size enough to accommodate max_rx_pkt_len as long as
+  configure large stride size enough to accommodate MTU as long as
   device allows. Note that this can waste system memory compared to enabling Rx
   scatter and multi-segment packet.
 
index b1a868b054d13e20cd54ed7bc47575f590d273e1..8236cc3e93e043a4b59910b7ef340d28febfaccf 100644 (file)
@@ -157,7 +157,7 @@ Maximum packet length
 ~~~~~~~~~~~~~~~~~~~~~
 
 The OCTEON TX SoC family NICs support a maximum of a 32K jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
 member of ``struct rte_eth_conf`` is set to a value lower than 32k, frames
 up to 32k bytes can still reach the host interface.
 
index 12d43ce93e28e0b8e0c294086c40ec8ff7943b1b..98f23a2b2a3d51ade2d0aa300608e88156bb65f7 100644 (file)
@@ -392,7 +392,7 @@ Maximum packet length
 ~~~~~~~~~~~~~~~~~~~~~
 
 The ThunderX SoC family NICs support a maximum of a 9K jumbo frame. The value
-is fixed and cannot be changed. So, even when the ``rxmode.max_rx_pkt_len``
+is fixed and cannot be changed. So, even when the ``rxmode.mtu``
 member of ``struct rte_eth_conf`` is set to a value lower than 9200, frames
 up to 9200 bytes can still reach the host interface.
 
index 40c419693e5ef1618edab3736003944786e1ed55..cc2b89850b07743a43acc773fb41d10dd43c3673 100644 (file)
@@ -92,31 +92,6 @@ Deprecation Notices
   In 19.11 PMDs will still update the field even when the offload is not
   enabled.
 
-* ethdev: ``uint32_t max_rx_pkt_len`` field of ``struct rte_eth_rxmode``, will be
-  replaced by a new ``uint32_t mtu`` field of ``struct rte_eth_conf`` in v21.11.
-  The new ``mtu`` field will be used to configure the initial device MTU via
-  ``rte_eth_dev_configure()`` API.
-  Later MTU can be changed by ``rte_eth_dev_set_mtu()`` API as done now.
-  The existing ``(struct rte_eth_dev)->data->mtu`` variable will be used to store
-  the configured ``mtu`` value,
-  and this new ``(struct rte_eth_dev)->data->dev_conf.mtu`` variable will
-  be used to store the user configuration request.
-  Unlike ``max_rx_pkt_len``, which was valid only when ``JUMBO_FRAME`` enabled,
-  ``mtu`` field will be always valid.
-  When ``mtu`` config is not provided by the application, default ``RTE_ETHER_MTU``
-  value will be used.
-  ``(struct rte_eth_dev)->data->mtu`` should be updated after MTU set successfully,
-  either by ``rte_eth_dev_configure()`` or ``rte_eth_dev_set_mtu()``.
-
-  An application may need to configure device for a specific Rx packet size, like for
-  cases ``DEV_RX_OFFLOAD_SCATTER`` is not supported and device received packet size
-  can't be bigger than Rx buffer size.
-  To cover these cases an application needs to know the device packet overhead to be
-  able to calculate the ``mtu`` corresponding to a Rx buffer size, for this
-  ``(struct rte_eth_dev_info).max_rx_pktlen`` will be kept,
-  the device packet overhead can be calculated as:
-  ``(struct rte_eth_dev_info).max_rx_pktlen - (struct rte_eth_dev_info).max_mtu``
-
 * ethdev: Announce moving from dedicated modify function for each field,
   to using the general ``rte_flow_modify_field`` action.
 
index 812aaa87b05ba48d46dac22818edfe9f3d7da686..6c4c04e935e49405336894d91138a257ec674a3e 100644 (file)
@@ -162,12 +162,7 @@ Forwarding application is shown below:
     :end-before: >8 End of initializing a given port.
 
 The Ethernet ports are configured with default settings using the
-``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct.
-
-.. literalinclude:: ../../../examples/flow_classify/flow_classify.c
-    :language: c
-    :start-after: Ethernet ports configured with default settings using struct. 8<
-    :end-before: >8 End of configuration of Ethernet ports.
+``rte_eth_dev_configure()`` function.
 
 For this example the ports are set up with 1 RX and 1 TX queue using the
 ``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
index 2d5cd5f1c0bab66e4ed00018f656d0939795848b..56af5cd5b383fe10651c8aa322a9de0aeb8162fb 100644 (file)
@@ -65,7 +65,7 @@ The application has a number of command line options::
                              [--lookup LOOKUP_METHOD]
                              --config(port,queue,lcore)[,(port,queue,lcore)]
                              [--eth-dest=X,MM:MM:MM:MM:MM:MM]
-                             [--enable-jumbo [--max-pkt-len PKTLEN]]
+                             [--max-pkt-len PKTLEN]
                              [--no-numa]
                              [--hash-entry-num]
                              [--ipv6]
@@ -95,9 +95,7 @@ Where,
 
 * ``--eth-dest=X,MM:MM:MM:MM:MM:MM:`` Optional, ethernet destination for port X.
 
-* ``--enable-jumbo:`` Optional, enables jumbo frames.
-
-* ``--max-pkt-len:`` Optional, under the premise of enabling jumbo, maximum packet length in decimal (64-9600).
+* ``--max-pkt-len:`` Optional, maximum packet length in decimal (64-9600).
 
 * ``--no-numa:`` Optional, disables numa awareness.
 
index 2cf6e4556f14a522ebc979de5f60440317155c9c..486247ac2e4f7727a497c39a8bfebcd53babb9eb 100644 (file)
@@ -236,7 +236,7 @@ The application has a number of command line options:
 
 ..  code-block:: console
 
-    ./<build_dir>/examples/dpdk-l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME --rule_ipv6 FILENAME [--alg=<val>] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa] [--eth-dest=X,MM:MM:MM:MM:MM:MM]
+    ./<build_dir>/examples/dpdk-l3fwd-acl [EAL options] -- -p PORTMASK [-P] --config(port,queue,lcore)[,(port,queue,lcore)] --rule_ipv4 FILENAME --rule_ipv6 FILENAME [--alg=<val>] [--max-pkt-len PKTLEN] [--no-numa] [--eth-dest=X,MM:MM:MM:MM:MM:MM]
 
 
 where,
@@ -255,8 +255,6 @@ where,
 *   --alg=<val>: optional, ACL classify method to use, one of:
     ``scalar|sse|avx2|neon|altivec|avx512x16|avx512x32``
 
-*   --enable-jumbo: optional, enables jumbo frames
-
 *   --max-pkt-len: optional, maximum packet length in decimal (64-9600)
 
 *   --no-numa: optional, disables numa awareness
index 03e9a85aa68c36e06e2a20c1538876cb53ab9934..0a3e0d44ecea98fe5bed8f582b36b4d2701aa2e5 100644 (file)
@@ -48,7 +48,7 @@ The application has a number of command line options similar to l3fwd::
                                    [-P]
                                    --config(port,queue,lcore)[,(port,queue,lcore)]
                                    [--eth-dest=X,MM:MM:MM:MM:MM:MM]
-                                   [--enable-jumbo [--max-pkt-len PKTLEN]]
+                                   [--max-pkt-len PKTLEN]
                                    [--no-numa]
                                    [--per-port-pool]
 
@@ -63,9 +63,7 @@ Where,
 
 * ``--eth-dest=X,MM:MM:MM:MM:MM:MM:`` Optional, ethernet destination for port X.
 
-* ``--enable-jumbo:`` Optional, enables jumbo frames.
-
-* ``--max-pkt-len:`` Optional, under the premise of enabling jumbo, maximum packet length in decimal (64-9600).
+* ``--max-pkt-len:`` Optional, maximum packet length in decimal (64-9600).
 
 * ``--no-numa:`` Optional, disables numa awareness.
 
index 0495314c87d536dd33a6e873b64d0805a7499294..8817eaadbfc3bee170b3309c03694bfb3ceeef45 100644 (file)
@@ -88,7 +88,7 @@ The application has a number of command line options:
 
 .. code-block:: console
 
-    ./<build_dir>/examples/dpdk-l3fwd_power [EAL options] -- -p PORTMASK [-P]  --config(port,queue,lcore)[,(port,queue,lcore)] [--enable-jumbo [--max-pkt-len PKTLEN]] [--no-numa]
+    ./<build_dir>/examples/dpdk-l3fwd_power [EAL options] -- -p PORTMASK [-P]  --config(port,queue,lcore)[,(port,queue,lcore)] [--max-pkt-len PKTLEN] [--no-numa]
 
 where,
 
@@ -99,8 +99,6 @@ where,
 
 *   --config (port,queue,lcore)[,(port,queue,lcore)]: determines which queues from which ports are mapped to which cores.
 
-*   --enable-jumbo: optional, enables jumbo frames
-
 *   --max-pkt-len: optional, maximum packet length in decimal (64-9600)
 
 *   --no-numa: optional, disables numa awareness
index 9b09838f64489cb43532cd0b306729cf5e4d76c1..7d1bf6eaae8c6f3bbb0ee2096e74d7da3ae0d203 100644 (file)
@@ -59,7 +59,7 @@ The application has a number of command line options::
         -p PORTMASK [-P]
         --rx(port,queue,lcore,thread)[,(port,queue,lcore,thread)]
         --tx(lcore,thread)[,(lcore,thread)]
-        [--enable-jumbo] [--max-pkt-len PKTLEN]]  [--no-numa]
+        [--max-pkt-len PKTLEN]  [--no-numa]
         [--hash-entry-num] [--ipv6] [--no-lthreads] [--stat-lcore lcore]
         [--parse-ptype]
 
@@ -80,8 +80,6 @@ Where:
   the lcore the thread runs on, and the id of RX thread with which it is
   associated. The parameters are explained below.
 
-* ``--enable-jumbo``: optional, enables jumbo frames.
-
 * ``--max-pkt-len``: optional, maximum packet length in decimal (64-9600).
 
 * ``--no-numa``: optional, disables numa awareness.
index f7bcd7ed2a1de29fdfe0399411089f8a6146348c..6d0de64401055bbfe50f564121c6f01839fd359d 100644 (file)
@@ -106,12 +106,7 @@ Forwarding application is shown below:
     :end-before: >8 End of main functional part of port initialization.
 
 The Ethernet ports are configured with default settings using the
-``rte_eth_dev_configure()`` function and the ``port_conf_default`` struct:
-
-.. literalinclude:: ../../../examples/skeleton/basicfwd.c
-        :language: c
-        :start-after: Configuration of ethernet ports. 8<
-        :end-before: >8 End of configuration of ethernet ports.
+``rte_eth_dev_configure()`` function.
 
 For this example the ports are set up with 1 RX and 1 TX queue using the
 ``rte_eth_rx_queue_setup()`` and ``rte_eth_tx_queue_setup()`` functions.
index 0ce35eb519e21ce0daf753509cba256841f32cda..3f654c0715665a5a33f475d51a324020db78c209 100644 (file)
@@ -1636,9 +1636,6 @@ atl_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen)
                return -EINVAL;
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        return 0;
 }
 
index 6cb8bb4338deb11567a7103f34d3e23bccdfb12f..932ec90265cf63716677e4fe55315af46e2a2edd 100644 (file)
@@ -1059,17 +1059,18 @@ static int
 avp_dev_enable_scattered(struct rte_eth_dev *eth_dev,
                         struct avp_dev *avp)
 {
-       unsigned int max_rx_pkt_len;
+       unsigned int max_rx_pktlen;
 
-       max_rx_pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       max_rx_pktlen = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+               RTE_ETHER_CRC_LEN;
 
-       if ((max_rx_pkt_len > avp->guest_mbuf_size) ||
-           (max_rx_pkt_len > avp->host_mbuf_size)) {
+       if (max_rx_pktlen > avp->guest_mbuf_size ||
+           max_rx_pktlen > avp->host_mbuf_size) {
                /*
                 * If the guest MTU is greater than either the host or guest
                 * buffers then chained mbufs have to be enabled in the TX
                 * direction.  It is assumed that the application will not need
-                * to send packets larger than their max_rx_pkt_len (MRU).
+                * to send packets larger than their MTU.
                 */
                return 1;
        }
@@ -1124,7 +1125,7 @@ avp_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
 
        PMD_DRV_LOG(DEBUG, "AVP max_rx_pkt_len=(%u,%u) mbuf_size=(%u,%u)\n",
                    avp->max_rx_pkt_len,
-                   eth_dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                   eth_dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN,
                    avp->host_mbuf_size,
                    avp->guest_mbuf_size);
 
@@ -1889,8 +1890,8 @@ avp_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                         * function; send it truncated to avoid the performance
                         * hit of having to manage returning the already
                         * allocated buffer to the free list.  This should not
-                        * happen since the application should have set the
-                        * max_rx_pkt_len based on its MTU and it should be
+                        * happen since the application should have not send
+                        * packages larger than its MTU and it should be
                         * policing its own packet sizes.
                         */
                        txq->errors++;
index ebd5411fddf3d5d380f29d5f31078533d9e53f35..76cd892eec7b74836996c37acb7b5afa404c8e30 100644 (file)
@@ -350,7 +350,7 @@ axgbe_dev_start(struct rte_eth_dev *dev)
        struct axgbe_port *pdata = dev->data->dev_private;
        int ret;
        struct rte_eth_dev_data *dev_data = dev->data;
-       uint16_t max_pkt_len = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+       uint16_t max_pkt_len;
 
        dev->dev_ops = &axgbe_eth_dev_ops;
 
@@ -383,6 +383,8 @@ axgbe_dev_start(struct rte_eth_dev *dev)
 
        rte_bit_relaxed_clear32(AXGBE_STOPPED, &pdata->dev_state);
        rte_bit_relaxed_clear32(AXGBE_DOWN, &pdata->dev_state);
+
+       max_pkt_len = dev_data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
        if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
                                max_pkt_len > pdata->rx_buf_size)
                dev_data->scattered_rx = 1;
@@ -1490,7 +1492,7 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                                dev->data->port_id);
                return -EBUSY;
        }
-       if (frame_size > AXGBE_ETH_MAX_LEN) {
+       if (mtu > RTE_ETHER_MTU) {
                dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
                val = 1;
@@ -1500,7 +1502,6 @@ static int axgb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                val = 0;
        }
        AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
        return 0;
 }
 
index 8b0806016ff0079121774408f421d90147b053bc..aff53fedb980df3146d1b06bd5074d6ab56a5077 100644 (file)
@@ -175,16 +175,12 @@ static int
 bnx2x_dev_configure(struct rte_eth_dev *dev)
 {
        struct bnx2x_softc *sc = dev->data->dev_private;
-       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
        int mp_ncpus = sysconf(_SC_NPROCESSORS_CONF);
 
        PMD_INIT_FUNC_TRACE(sc);
 
-       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               sc->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len;
-               dev->data->mtu = sc->mtu;
-       }
+       sc->mtu = dev->data->dev_conf.rxmode.mtu;
 
        if (dev->data->nb_tx_queues > dev->data->nb_rx_queues) {
                PMD_DRV_LOG(ERR, sc, "The number of TX queues is greater than number of RX queues");
index ebda74d02f3aa98df0290a9f661b5aacb499eb68..890197d340376818796d4d91499820de74454284 100644 (file)
@@ -1161,13 +1161,8 @@ static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev)
                rx_offloads |= DEV_RX_OFFLOAD_RSS_HASH;
        eth_dev->data->dev_conf.rxmode.offloads = rx_offloads;
 
-       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               eth_dev->data->mtu =
-                       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                       RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE *
-                       BNXT_NUM_VLANS;
-               bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
-       }
+       bnxt_mtu_set_op(eth_dev, eth_dev->data->mtu);
+
        return 0;
 
 resource_error:
@@ -1205,6 +1200,7 @@ void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
  */
 static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 {
+       uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
        uint16_t buf_size;
        int i;
 
@@ -1219,7 +1215,7 @@ static int bnxt_scattered_rx(struct rte_eth_dev *eth_dev)
 
                buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
                                      RTE_PKTMBUF_HEADROOM);
-               if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size)
+               if (eth_dev->data->mtu + overhead > buf_size)
                        return 1;
        }
        return 0;
@@ -3030,6 +3026,7 @@ bnxt_tx_burst_mode_get(struct rte_eth_dev *dev, __rte_unused uint16_t queue_id,
 
 int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
 {
+       uint32_t overhead = BNXT_MAX_PKT_LEN - BNXT_MAX_MTU;
        struct bnxt *bp = eth_dev->data->dev_private;
        uint32_t new_pkt_size;
        uint32_t rc = 0;
@@ -3043,8 +3040,7 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
        if (!eth_dev->data->nb_rx_queues)
                return rc;
 
-       new_pkt_size = new_mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
-                      VLAN_TAG_SIZE * BNXT_NUM_VLANS;
+       new_pkt_size = new_mtu + overhead;
 
        /*
         * Disallow any MTU change that would require scattered receive support
@@ -3071,7 +3067,7 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
        }
 
        /* Is there a change in mtu setting? */
-       if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len == new_pkt_size)
+       if (eth_dev->data->mtu == new_mtu)
                return rc;
 
        for (i = 0; i < bp->nr_vnics; i++) {
@@ -3093,9 +3089,6 @@ int bnxt_mtu_set_op(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
                }
        }
 
-       if (!rc)
-               eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_pkt_size;
-
        if (bnxt_hwrm_config_host_mtu(bp))
                PMD_DRV_LOG(WARNING, "Failed to configure host MTU\n");
 
index 0ca34c604ba812045f7d8e00984169db40914178..6d8b3c245a8414086b0b7b85d410a44e9932cdd8 100644 (file)
@@ -1721,8 +1721,8 @@ slave_configure(struct rte_eth_dev *bonded_eth_dev,
                slave_eth_dev->data->dev_conf.rxmode.offloads &=
                                ~DEV_RX_OFFLOAD_VLAN_FILTER;
 
-       slave_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
-                       bonded_eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       slave_eth_dev->data->dev_conf.rxmode.mtu =
+                       bonded_eth_dev->data->dev_conf.rxmode.mtu;
 
        if (bonded_eth_dev->data->dev_conf.rxmode.offloads &
                        DEV_RX_OFFLOAD_JUMBO_FRAME)
index 966bd23c7f986abb2bb0f72e5c7b7ad1b5c2a370..c94fc505fef1a28f9c33e552be374412d5305403 100644 (file)
@@ -209,7 +209,7 @@ nix_enable_mseg_on_jumbo(struct cnxk_eth_rxq_sp *rxq)
        mbp_priv = rte_mempool_get_priv(rxq->qconf.mp);
        buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
-       if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+       if (eth_dev->data->mtu + (uint32_t)CNXK_NIX_L2_OVERHEAD > buffsz) {
                dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
                dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
        }
@@ -220,18 +220,13 @@ nix_recalc_mtu(struct rte_eth_dev *eth_dev)
 {
        struct rte_eth_dev_data *data = eth_dev->data;
        struct cnxk_eth_rxq_sp *rxq;
-       uint16_t mtu;
        int rc;
 
        rxq = ((struct cnxk_eth_rxq_sp *)data->rx_queues[0]) - 1;
        /* Setup scatter mode if needed by jumbo */
        nix_enable_mseg_on_jumbo(rxq);
 
-       /* Setup MTU based on max_rx_pkt_len */
-       mtu = data->dev_conf.rxmode.max_rx_pkt_len - CNXK_NIX_L2_OVERHEAD +
-                               CNXK_NIX_MAX_VTAG_ACT_SIZE;
-
-       rc = cnxk_nix_mtu_set(eth_dev, mtu);
+       rc = cnxk_nix_mtu_set(eth_dev, data->mtu);
        if (rc)
                plt_err("Failed to set default MTU size, rc=%d", rc);
 
index b6cc5286c6d0a45a7547653551681931b628a3af..695d0d6fd3e2e7f38b32f1a919a5f048136b77ac 100644 (file)
@@ -440,16 +440,10 @@ cnxk_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
                goto exit;
        }
 
-       frame_size += RTE_ETHER_CRC_LEN;
-
-       if (frame_size > RTE_ETHER_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       /* Update max_rx_pkt_len */
-       data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
 exit:
        return rc;
 }
index cd9aa9f84b63c998dc6f45a5a254ec4b5a463816..458111ae5b160bca966906f47848346ee04fee0c 100644 (file)
@@ -310,11 +310,11 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
                return err;
 
        /* Must accommodate at least RTE_ETHER_MIN_MTU */
-       if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
+       if (mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
                return -EINVAL;
 
        /* set to jumbo mode if needed */
-       if (new_mtu > CXGBE_ETH_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                eth_dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
@@ -323,9 +323,6 @@ int cxgbe_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 
        err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
                            -1, -1, true);
-       if (!err)
-               eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
-
        return err;
 }
 
@@ -623,7 +620,8 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
                             const struct rte_eth_rxconf *rx_conf __rte_unused,
                             struct rte_mempool *mp)
 {
-       unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       unsigned int pkt_len = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+               RTE_ETHER_CRC_LEN;
        struct port_info *pi = eth_dev->data->dev_private;
        struct adapter *adapter = pi->adapter;
        struct rte_eth_dev_info dev_info;
@@ -682,7 +680,7 @@ int cxgbe_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
        rxq->fl.size = temp_nb_desc;
 
        /* Set to jumbo mode if necessary */
-       if (pkt_len > CXGBE_ETH_MAX_LEN)
+       if (eth_dev->data->mtu > RTE_ETHER_MTU)
                eth_dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
index 6dd1bf1f836eaf2fd0452240ff1cef92d3b50b92..91d6bb9bbcb0e274316aa3079a34adc8992c0527 100644 (file)
@@ -1661,8 +1661,7 @@ int cxgbe_link_start(struct port_info *pi)
        unsigned int mtu;
        int ret;
 
-       mtu = pi->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
-             (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN);
+       mtu = pi->eth_dev->data->mtu;
 
        conf_offloads = pi->eth_dev->data->dev_conf.rxmode.offloads;
 
index e5f7721dc4b3bc15f68a75153f441d2beac1d2dc..830f5192474d8509ccfe92e8f1445f4fb08cbb4e 100644 (file)
@@ -1113,7 +1113,7 @@ int t4_eth_xmit(struct sge_eth_txq *txq, struct rte_mbuf *mbuf,
        u32 wr_mid;
        u64 cntrl, *end;
        bool v6;
-       u32 max_pkt_len = txq->data->dev_conf.rxmode.max_rx_pkt_len;
+       u32 max_pkt_len;
 
        /* Reject xmit if queue is stopped */
        if (unlikely(txq->flags & EQ_STOPPED))
@@ -1129,6 +1129,7 @@ out_free:
                return 0;
        }
 
+       max_pkt_len = txq->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
        if ((!(m->ol_flags & PKT_TX_TCP_SEG)) &&
            (unlikely(m->pkt_len > max_pkt_len)))
                goto out_free;
index 840257c607ddd9f800d76530ace784dc5796bd52..c244c6f5a422e2105f6ac6c14cc6a654d80bfa33 100644 (file)
@@ -187,15 +187,13 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       if (frame_size > DPAA_ETH_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                dev->data->dev_conf.rxmode.offloads |=
                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->data->dev_conf.rxmode.offloads &=
                                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        fman_if_set_maxfrm(dev->process_private, frame_size);
 
        return 0;
@@ -213,6 +211,7 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
        struct fman_if *fif = dev->process_private;
        struct __fman_if *__fif;
        struct rte_intr_handle *intr_handle;
+       uint32_t max_rx_pktlen;
        int speed, duplex;
        int ret;
 
@@ -238,27 +237,17 @@ dpaa_eth_dev_configure(struct rte_eth_dev *dev)
                tx_offloads, dev_tx_offloads_nodis);
        }
 
-       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               uint32_t max_len;
-
-               DPAA_PMD_DEBUG("enabling jumbo");
-
-               if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
-                   DPAA_MAX_RX_PKT_LEN)
-                       max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
-               else {
-                       DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
-                               "supported is %d",
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len,
-                               DPAA_MAX_RX_PKT_LEN);
-                       max_len = DPAA_MAX_RX_PKT_LEN;
-               }
-
-               fman_if_set_maxfrm(dev->process_private, max_len);
-               dev->data->mtu = max_len
-                       - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE;
+       max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
+                       RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
+       if (max_rx_pktlen > DPAA_MAX_RX_PKT_LEN) {
+               DPAA_PMD_INFO("enabling jumbo override conf max len=%d "
+                       "supported is %d",
+                       max_rx_pktlen, DPAA_MAX_RX_PKT_LEN);
+               max_rx_pktlen = DPAA_MAX_RX_PKT_LEN;
        }
 
+       fman_if_set_maxfrm(dev->process_private, max_rx_pktlen);
+
        if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) {
                DPAA_PMD_DEBUG("enabling scatter mode");
                fman_if_set_sg(dev->process_private, 1);
@@ -936,6 +925,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
        u32 flags = 0;
        int ret;
        u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+       uint32_t max_rx_pktlen;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -977,17 +967,17 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                return -EINVAL;
        }
 
+       max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
+               VLAN_TAG_SIZE;
        /* Max packet can fit in single buffer */
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
+       if (max_rx_pktlen <= buffsz) {
                ;
        } else if (dev->data->dev_conf.rxmode.offloads &
                        DEV_RX_OFFLOAD_SCATTER) {
-               if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
-                       buffsz * DPAA_SGT_MAX_ENTRIES) {
-                       DPAA_PMD_ERR("max RxPkt size %d too big to fit "
+               if (max_rx_pktlen > buffsz * DPAA_SGT_MAX_ENTRIES) {
+                       DPAA_PMD_ERR("Maximum Rx packet size %d too big to fit "
                                "MaxSGlist %d",
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len,
-                               buffsz * DPAA_SGT_MAX_ENTRIES);
+                               max_rx_pktlen, buffsz * DPAA_SGT_MAX_ENTRIES);
                        rte_errno = EOVERFLOW;
                        return -rte_errno;
                }
@@ -995,8 +985,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
                DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is"
                     " larger than a single mbuf (%u) and scattered"
                     " mode has not been requested",
-                    dev->data->dev_conf.rxmode.max_rx_pkt_len,
-                    buffsz - RTE_PKTMBUF_HEADROOM);
+                    max_rx_pktlen, buffsz - RTE_PKTMBUF_HEADROOM);
        }
 
        dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
@@ -1034,8 +1023,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
        dpaa_intf->valid = 1;
        DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
-               fman_if_get_sg_enable(fif),
-               dev->data->dev_conf.rxmode.max_rx_pkt_len);
+               fman_if_get_sg_enable(fif), max_rx_pktlen);
        /* checking if push mode only, no error check for now */
        if (!rxq->is_static &&
            dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
index f2519f0fadf488209f15aff824178a472bf51785..b2a0c2dd40c59c814847a3037201ae45d10947ed 100644 (file)
@@ -540,6 +540,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        int tx_l3_csum_offload = false;
        int tx_l4_csum_offload = false;
        int ret, tc_index;
+       uint32_t max_rx_pktlen;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -559,25 +560,19 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                tx_offloads, dev_tx_offloads_nodis);
        }
 
-       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
-                       ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
-                               priv->token, eth_conf->rxmode.max_rx_pkt_len
-                               - RTE_ETHER_CRC_LEN);
-                       if (ret) {
-                               DPAA2_PMD_ERR(
-                                       "Unable to set mtu. check config");
-                               return ret;
-                       }
-                       dev->data->mtu =
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                               RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN -
-                               VLAN_TAG_SIZE;
-                               DPAA2_PMD_INFO("MTU configured for the device: %d",
-                                               dev->data->mtu);
-               } else {
-                       return -1;
+       max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN +
+                               RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE;
+       if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) {
+               ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
+                       priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN);
+               if (ret != 0) {
+                       DPAA2_PMD_ERR("Unable to set mtu. check config");
+                       return ret;
                }
+               DPAA2_PMD_INFO("MTU configured for the device: %d",
+                               dev->data->mtu);
+       } else {
+               return -1;
        }
 
        if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) {
@@ -1470,15 +1465,13 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN)
                return -EINVAL;
 
-       if (frame_size > DPAA2_ETH_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                dev->data->dev_conf.rxmode.offloads |=
                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->data->dev_conf.rxmode.offloads &=
                                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        /* Set the Max Rx frame length as 'mtu' +
         * Maximum Ethernet header length
         */
index 5a3af0da90280a915f3de86c6b7af631d3ef06f5..c9692bd7b7bc026bf489537e32b349d3841f0525 100644 (file)
@@ -1816,7 +1816,7 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        rctl = E1000_READ_REG(hw, E1000_RCTL);
 
        /* switch to jumbo mode if needed */
-       if (frame_size > E1000_ETH_MAX_LEN) {
+       if (mtu > RTE_ETHER_MTU) {
                dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl |= E1000_RCTL_LPE;
@@ -1827,8 +1827,6 @@ eth_em_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        }
        E1000_WRITE_REG(hw, E1000_RCTL, rctl);
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
        return 0;
 }
 
index 194da6b5b3f01de3bed14d8230e30e0b7039347c..9b75b5d08b3a3f2c6e74d4248e514ccd962c451e 100644 (file)
@@ -2677,9 +2677,7 @@ igb_vlan_hw_extend_disable(struct rte_eth_dev *dev)
        E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
        /* Update maximum packet length */
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
-               E1000_WRITE_REG(hw, E1000_RLPML,
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       E1000_WRITE_REG(hw, E1000_RLPML, dev->data->mtu + E1000_ETH_OVERHEAD);
 }
 
 static void
@@ -2695,10 +2693,8 @@ igb_vlan_hw_extend_enable(struct rte_eth_dev *dev)
        E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
 
        /* Update maximum packet length */
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
-               E1000_WRITE_REG(hw, E1000_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                               VLAN_TAG_SIZE);
+       E1000_WRITE_REG(hw, E1000_RLPML,
+               dev->data->mtu + E1000_ETH_OVERHEAD + VLAN_TAG_SIZE);
 }
 
 static int
@@ -4396,7 +4392,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        rctl = E1000_READ_REG(hw, E1000_RCTL);
 
        /* switch to jumbo mode if needed */
-       if (frame_size > E1000_ETH_MAX_LEN) {
+       if (mtu > RTE_ETHER_MTU) {
                dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl |= E1000_RCTL_LPE;
@@ -4407,11 +4403,7 @@ eth_igb_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        }
        E1000_WRITE_REG(hw, E1000_RCTL, rctl);
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
-       E1000_WRITE_REG(hw, E1000_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       E1000_WRITE_REG(hw, E1000_RLPML, frame_size);
 
        return 0;
 }
index e04c2b41ab42a4903e88467a75dfaece423b7e8c..2fc27bbbc68221b49c33c650819094817bf51af2 100644 (file)
@@ -2312,6 +2312,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
        uint32_t srrctl;
        uint16_t buf_size;
        uint16_t rctl_bsize;
+       uint32_t max_len;
        uint16_t i;
        int ret;
 
@@ -2330,9 +2331,8 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
        /*
         * Configure support of jumbo frames, if any.
         */
+       max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
        if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
-
                rctl |= E1000_RCTL_LPE;
 
                /*
@@ -2410,8 +2410,7 @@ eth_igb_rx_init(struct rte_eth_dev *dev)
                                               E1000_SRRCTL_BSIZEPKT_SHIFT);
 
                        /* It adds dual VLAN length for supporting dual VLAN */
-                       if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                               2 * VLAN_TAG_SIZE) > buf_size){
+                       if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
                                if (!dev->data->scattered_rx)
                                        PMD_INIT_LOG(DEBUG,
                                                     "forcing scatter mode");
@@ -2635,15 +2634,15 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
        uint32_t srrctl;
        uint16_t buf_size;
        uint16_t rctl_bsize;
+       uint32_t max_len;
        uint16_t i;
        int ret;
 
        hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
        /* setup MTU */
-       e1000_rlpml_set_vf(hw,
-               (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
-               VLAN_TAG_SIZE));
+       max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
+       e1000_rlpml_set_vf(hw, (uint16_t)(max_len + VLAN_TAG_SIZE));
 
        /* Configure and enable each RX queue. */
        rctl_bsize = 0;
@@ -2700,8 +2699,7 @@ eth_igbvf_rx_init(struct rte_eth_dev *dev)
                                               E1000_SRRCTL_BSIZEPKT_SHIFT);
 
                        /* It adds dual VLAN length for supporting dual VLAN */
-                       if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                               2 * VLAN_TAG_SIZE) > buf_size){
+                       if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
                                if (!dev->data->scattered_rx)
                                        PMD_INIT_LOG(DEBUG,
                                                     "forcing scatter mode");
index a82d4b62873672262db075fe0a4d0f285c03088e..e2f7213acb84b9ada324b5967e2d7f9ce20d6490 100644 (file)
@@ -677,26 +677,14 @@ err:
        return rc;
 }
 
-static uint32_t ena_get_mtu_conf(struct ena_adapter *adapter)
-{
-       uint32_t max_frame_len = adapter->max_mtu;
-
-       if (adapter->edev_data->dev_conf.rxmode.offloads &
-           DEV_RX_OFFLOAD_JUMBO_FRAME)
-               max_frame_len =
-                       adapter->edev_data->dev_conf.rxmode.max_rx_pkt_len;
-
-       return max_frame_len;
-}
-
 static int ena_check_valid_conf(struct ena_adapter *adapter)
 {
-       uint32_t max_frame_len = ena_get_mtu_conf(adapter);
+       uint32_t mtu = adapter->edev_data->mtu;
 
-       if (max_frame_len > adapter->max_mtu || max_frame_len < ENA_MIN_MTU) {
+       if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
                PMD_INIT_LOG(ERR,
                        "Unsupported MTU of %d. Max MTU: %d, min MTU: %d\n",
-                       max_frame_len, adapter->max_mtu, ENA_MIN_MTU);
+                       mtu, adapter->max_mtu, ENA_MIN_MTU);
                return ENA_COM_UNSUPPORTED;
        }
 
@@ -869,10 +857,10 @@ static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        ena_dev = &adapter->ena_dev;
        ena_assert_msg(ena_dev != NULL, "Uninitialized device\n");
 
-       if (mtu > ena_get_mtu_conf(adapter) || mtu < ENA_MIN_MTU) {
+       if (mtu > adapter->max_mtu || mtu < ENA_MIN_MTU) {
                PMD_DRV_LOG(ERR,
                        "Invalid MTU setting. New MTU: %d, max MTU: %d, min MTU: %d\n",
-                       mtu, ena_get_mtu_conf(adapter), ENA_MIN_MTU);
+                       mtu, adapter->max_mtu, ENA_MIN_MTU);
                return -EINVAL;
        }
 
@@ -1943,7 +1931,10 @@ static int ena_infos_get(struct rte_eth_dev *dev,
        dev_info->hash_key_size = ENA_HASH_KEY_SIZE;
 
        dev_info->min_rx_bufsize = ENA_MIN_FRAME_LEN;
-       dev_info->max_rx_pktlen  = adapter->max_mtu;
+       dev_info->max_rx_pktlen  = adapter->max_mtu + RTE_ETHER_HDR_LEN +
+               RTE_ETHER_CRC_LEN;
+       dev_info->min_mtu = ENA_MIN_MTU;
+       dev_info->max_mtu = adapter->max_mtu;
        dev_info->max_mac_addrs = 1;
 
        dev_info->max_rx_queues = adapter->max_num_io_queues;
index 784ed391b749e3b65ed6d1224513552336dc6524..16c83914e8ce053e5a399962e3d3919f285cd72e 100644 (file)
@@ -681,7 +681,7 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       if (frame_size > ENETC_ETH_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                dev->data->dev_conf.rxmode.offloads &=
                                                DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
@@ -691,8 +691,6 @@ enetc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
        enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
 
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        /*setting the MTU*/
        enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(frame_size) |
                      ENETC_SET_TX_MTU(ENETC_MAC_MAXFRM_SIZE));
@@ -709,23 +707,15 @@ enetc_dev_configure(struct rte_eth_dev *dev)
        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
        uint64_t rx_offloads = eth_conf->rxmode.offloads;
        uint32_t checksum = L3_CKSUM | L4_CKSUM;
+       uint32_t max_len;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               uint32_t max_len;
-
-               max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
-
-               enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM,
-                             ENETC_SET_MAXFRM(max_len));
-               enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0),
-                             ENETC_MAC_MAXFRM_SIZE);
-               enetc_port_wr(enetc_hw, ENETC_PTXMBAR,
-                             2 * ENETC_MAC_MAXFRM_SIZE);
-               dev->data->mtu = RTE_ETHER_MAX_LEN - RTE_ETHER_HDR_LEN -
-                       RTE_ETHER_CRC_LEN;
-       }
+       max_len = dev->data->dev_conf.rxmode.mtu + RTE_ETHER_HDR_LEN +
+               RTE_ETHER_CRC_LEN;
+       enetc_port_wr(enetc_hw, ENETC_PM0_MAXFRM, ENETC_SET_MAXFRM(max_len));
+       enetc_port_wr(enetc_hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+       enetc_port_wr(enetc_hw, ENETC_PTXMBAR, 2 * ENETC_MAC_MAXFRM_SIZE);
 
        if (rx_offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
                int config;
index a1a53248f63b27eeb378868af1a3081c85b22962..8df7332bc5e006bca9610462af19b26f08461de7 100644 (file)
@@ -459,7 +459,7 @@ static int enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
         * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
         * a hint to the driver to size receive buffers accordingly so that
         * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
-        * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+        * the user decide the buffer size via rxmode.mtu, basically
         * ignoring vNIC mtu.
         */
        device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
index 2affd380c6a4807a84075191ca64106e172dc9c0..dfc7f5d1f94f6326f58540fef94d97777b7cc8c3 100644 (file)
@@ -282,7 +282,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
        struct rq_enet_desc *rqd = rq->ring.descs;
        unsigned i;
        dma_addr_t dma_addr;
-       uint32_t max_rx_pkt_len;
+       uint32_t max_rx_pktlen;
        uint16_t rq_buf_len;
 
        if (!rq->in_use)
@@ -293,16 +293,16 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
 
        /*
         * If *not* using scatter and the mbuf size is greater than the
-        * requested max packet size (max_rx_pkt_len), then reduce the
-        * posted buffer size to max_rx_pkt_len. HW still receives packets
-        * larger than max_rx_pkt_len, but they will be truncated, which we
+        * requested max packet size (mtu + eth overhead), then reduce the
+        * posted buffer size to max packet size. HW still receives packets
+        * larger than max packet size, but they will be truncated, which we
         * drop in the rx handler. Not ideal, but better than returning
         * large packets when the user is not expecting them.
         */
-       max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
        rq_buf_len = rte_pktmbuf_data_room_size(rq->mp) - RTE_PKTMBUF_HEADROOM;
-       if (max_rx_pkt_len < rq_buf_len && !rq->data_queue_enable)
-               rq_buf_len = max_rx_pkt_len;
+       if (max_rx_pktlen < rq_buf_len && !rq->data_queue_enable)
+               rq_buf_len = max_rx_pktlen;
        for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
                mb = rte_mbuf_raw_alloc(rq->mp);
                if (mb == NULL) {
@@ -818,7 +818,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
        unsigned int mbuf_size, mbufs_per_pkt;
        unsigned int nb_sop_desc, nb_data_desc;
        uint16_t min_sop, max_sop, min_data, max_data;
-       uint32_t max_rx_pkt_len;
+       uint32_t max_rx_pktlen;
 
        /*
         * Representor uses a reserved PF queue. Translate representor
@@ -854,23 +854,23 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
 
        mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) -
                               RTE_PKTMBUF_HEADROOM);
-       /* max_rx_pkt_len includes the ethernet header and CRC. */
-       max_rx_pkt_len = enic->rte_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       /* max_rx_pktlen includes the ethernet header and CRC. */
+       max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->rte_dev->data->mtu);
 
        if (enic->rte_dev->data->dev_conf.rxmode.offloads &
            DEV_RX_OFFLOAD_SCATTER) {
                dev_info(enic, "Rq %u Scatter rx mode enabled\n", queue_idx);
                /* ceil((max pkt len)/mbuf_size) */
-               mbufs_per_pkt = (max_rx_pkt_len + mbuf_size - 1) / mbuf_size;
+               mbufs_per_pkt = (max_rx_pktlen + mbuf_size - 1) / mbuf_size;
        } else {
                dev_info(enic, "Scatter rx mode disabled\n");
                mbufs_per_pkt = 1;
-               if (max_rx_pkt_len > mbuf_size) {
+               if (max_rx_pktlen > mbuf_size) {
                        dev_warning(enic, "The maximum Rx packet size (%u) is"
                                    " larger than the mbuf size (%u), and"
                                    " scatter is disabled. Larger packets will"
                                    " be truncated.\n",
-                                   max_rx_pkt_len, mbuf_size);
+                                   max_rx_pktlen, mbuf_size);
                }
        }
 
@@ -879,16 +879,15 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
                rq_sop->data_queue_enable = 1;
                rq_data->in_use = 1;
                /*
-                * HW does not directly support rxmode.max_rx_pkt_len. HW always
+                * HW does not directly support MTU. HW always
                 * receives packet sizes up to the "max" MTU.
                 * If not using scatter, we can achieve the effect of dropping
                 * larger packets by reducing the size of posted buffers.
                 * See enic_alloc_rx_queue_mbufs().
                 */
-               if (max_rx_pkt_len <
-                   enic_mtu_to_max_rx_pktlen(enic->max_mtu)) {
-                       dev_warning(enic, "rxmode.max_rx_pkt_len is ignored"
-                                   " when scatter rx mode is in use.\n");
+               if (enic->rte_dev->data->mtu < enic->max_mtu) {
+                       dev_warning(enic,
+                               "mtu is ignored when scatter rx mode is in use.\n");
                }
        } else {
                dev_info(enic, "Rq %u Scatter rx mode not being used\n",
@@ -931,7 +930,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
        if (mbufs_per_pkt > 1) {
                dev_info(enic, "For max packet size %u and mbuf size %u valid"
                         " rx descriptor range is %u to %u\n",
-                        max_rx_pkt_len, mbuf_size, min_sop + min_data,
+                        max_rx_pktlen, mbuf_size, min_sop + min_data,
                         max_sop + max_data);
        }
        dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
@@ -1634,11 +1633,6 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu)
                        "MTU (%u) is greater than value configured in NIC (%u)\n",
                        new_mtu, config_mtu);
 
-       /* Update the MTU and maximum packet length */
-       eth_dev->data->mtu = new_mtu;
-       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len =
-               enic_mtu_to_max_rx_pktlen(new_mtu);
-
        /*
         * If the device has not started (enic_enable), nothing to do.
         * Later, enic_enable() will set up RQs reflecting the new maximum
index c436263c7c9c3684cf3c61abd2511b7a4e9033aa..400e77ec62006d1e3362072e2dbb0cc4438c2d1e 100644 (file)
@@ -757,7 +757,7 @@ fm10k_dev_rx_init(struct rte_eth_dev *dev)
                                FM10K_SRRCTL_LOOPBACK_SUPPRESS);
 
                /* It adds dual VLAN length for supporting dual VLAN */
-               if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
+               if ((dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN +
                                2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
                        rxq->offloads & DEV_RX_OFFLOAD_SCATTER) {
                        uint32_t reg;
index cd4dad8588f3b348a61ef792371846429f25ce79..aef8adc2e1e0eeb1f8a97dc2e5bbe9ec4ac321ab 100644 (file)
@@ -315,19 +315,19 @@ static int hinic_dev_configure(struct rte_eth_dev *dev)
                dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_RSS_HASH;
 
        /* mtu size is 256~9600 */
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len < HINIC_MIN_FRAME_SIZE ||
-           dev->data->dev_conf.rxmode.max_rx_pkt_len >
-           HINIC_MAX_JUMBO_FRAME_SIZE) {
+       if (HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) <
+                       HINIC_MIN_FRAME_SIZE ||
+           HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu) >
+                       HINIC_MAX_JUMBO_FRAME_SIZE) {
                PMD_DRV_LOG(ERR,
-                       "Max rx pkt len out of range, get max_rx_pkt_len:%d, "
+                       "Packet length out of range, get packet length:%d, "
                        "expect between %d and %d",
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                       HINIC_MTU_TO_PKTLEN(dev->data->dev_conf.rxmode.mtu),
                        HINIC_MIN_FRAME_SIZE, HINIC_MAX_JUMBO_FRAME_SIZE);
                return -EINVAL;
        }
 
-       nic_dev->mtu_size =
-               HINIC_PKTLEN_TO_MTU(dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       nic_dev->mtu_size = dev->data->dev_conf.rxmode.mtu;
 
        /* rss template */
        err = hinic_config_mq_mode(dev, TRUE);
@@ -1534,7 +1534,6 @@ static void hinic_deinit_mac_addr(struct rte_eth_dev *eth_dev)
 static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct hinic_nic_dev *nic_dev = HINIC_ETH_DEV_TO_PRIVATE_NIC_DEV(dev);
-       uint32_t frame_size;
        int ret = 0;
 
        PMD_DRV_LOG(INFO, "Set port mtu, port_id: %d, mtu: %d, max_pkt_len: %d",
@@ -1552,16 +1551,13 @@ static int hinic_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                return ret;
        }
 
-       /* update max frame size */
-       frame_size = HINIC_MTU_TO_PKTLEN(mtu);
-       if (frame_size > HINIC_ETH_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->data->dev_conf.rxmode.offloads &=
                        ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
        nic_dev->mtu_size = mtu;
 
        return ret;
index b98a46f73e8c10e2273877d4c8016ae7981d0fda..e1fcba9e9482aa3dec8222ebea0055fb5f0978cb 100644 (file)
@@ -2366,41 +2366,6 @@ hns3_init_ring_with_vector(struct hns3_hw *hw)
        return 0;
 }
 
-static int
-hns3_refresh_mtu(struct rte_eth_dev *dev, struct rte_eth_conf *conf)
-{
-       struct hns3_adapter *hns = dev->data->dev_private;
-       struct hns3_hw *hw = &hns->hw;
-       uint32_t max_rx_pkt_len;
-       uint16_t mtu;
-       int ret;
-
-       if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME))
-               return 0;
-
-       /*
-        * If jumbo frames are enabled, MTU needs to be refreshed
-        * according to the maximum RX packet length.
-        */
-       max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
-       if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
-           max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
-               hns3_err(hw, "maximum Rx packet length must be greater than %u "
-                        "and no more than %u when jumbo frame enabled.",
-                        (uint16_t)HNS3_DEFAULT_FRAME_LEN,
-                        (uint16_t)HNS3_MAX_FRAME_LEN);
-               return -EINVAL;
-       }
-
-       mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
-       ret = hns3_dev_mtu_set(dev, mtu);
-       if (ret)
-               return ret;
-       dev->data->mtu = mtu;
-
-       return 0;
-}
-
 static int
 hns3_setup_dcb(struct rte_eth_dev *dev)
 {
@@ -2515,8 +2480,8 @@ hns3_dev_configure(struct rte_eth_dev *dev)
                        goto cfg_err;
        }
 
-       ret = hns3_refresh_mtu(dev, conf);
-       if (ret)
+       ret = hns3_dev_mtu_set(dev, conf->rxmode.mtu);
+       if (ret != 0)
                goto cfg_err;
 
        ret = hns3_mbuf_dyn_rx_timestamp_register(dev, conf);
@@ -2611,7 +2576,7 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        }
 
        rte_spinlock_lock(&hw->lock);
-       is_jumbo_frame = frame_size > HNS3_DEFAULT_FRAME_LEN ? true : false;
+       is_jumbo_frame = mtu > RTE_ETHER_MTU ? true : false;
        frame_size = RTE_MAX(frame_size, HNS3_DEFAULT_FRAME_LEN);
 
        /*
@@ -2632,7 +2597,6 @@ hns3_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        else
                dev->data->dev_conf.rxmode.offloads &=
                                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
        rte_spinlock_unlock(&hw->lock);
 
        return 0;
index e896de58a422e8f9cc9aed2bede07fc994186f33..b10fa2d5ad8ad2f1bf5e9962dd1c21bda77e2146 100644 (file)
@@ -784,8 +784,6 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
        uint16_t nb_rx_q = dev->data->nb_rx_queues;
        uint16_t nb_tx_q = dev->data->nb_tx_queues;
        struct rte_eth_rss_conf rss_conf;
-       uint32_t max_rx_pkt_len;
-       uint16_t mtu;
        bool gro_en;
        int ret;
 
@@ -825,28 +823,9 @@ hns3vf_dev_configure(struct rte_eth_dev *dev)
                        goto cfg_err;
        }
 
-       /*
-        * If jumbo frames are enabled, MTU needs to be refreshed
-        * according to the maximum RX packet length.
-        */
-       if (conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               max_rx_pkt_len = conf->rxmode.max_rx_pkt_len;
-               if (max_rx_pkt_len > HNS3_MAX_FRAME_LEN ||
-                   max_rx_pkt_len <= HNS3_DEFAULT_FRAME_LEN) {
-                       hns3_err(hw, "maximum Rx packet length must be greater "
-                                "than %u and less than %u when jumbo frame enabled.",
-                                (uint16_t)HNS3_DEFAULT_FRAME_LEN,
-                                (uint16_t)HNS3_MAX_FRAME_LEN);
-                       ret = -EINVAL;
-                       goto cfg_err;
-               }
-
-               mtu = (uint16_t)HNS3_PKTLEN_TO_MTU(max_rx_pkt_len);
-               ret = hns3vf_dev_mtu_set(dev, mtu);
-               if (ret)
-                       goto cfg_err;
-               dev->data->mtu = mtu;
-       }
+       ret = hns3vf_dev_mtu_set(dev, conf->rxmode.mtu);
+       if (ret != 0)
+               goto cfg_err;
 
        ret = hns3vf_dev_configure_vlan(dev);
        if (ret)
@@ -935,7 +914,6 @@ hns3vf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        else
                dev->data->dev_conf.rxmode.offloads &=
                                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
        rte_spinlock_unlock(&hw->lock);
 
        return 0;
index 02040b84f3c48e011a55b9d9c0345583aacc46ad..602548a4f25bebeb4cdec16e2c3b9e355952fe0e 100644 (file)
@@ -1747,18 +1747,18 @@ hns3_rxq_conf_runtime_check(struct hns3_hw *hw, uint16_t buf_size,
                                uint16_t nb_desc)
 {
        struct rte_eth_dev *dev = &rte_eth_devices[hw->data->port_id];
-       struct rte_eth_rxmode *rxmode = &hw->data->dev_conf.rxmode;
        eth_rx_burst_t pkt_burst = dev->rx_pkt_burst;
+       uint32_t frame_size = dev->data->mtu + HNS3_ETH_OVERHEAD;
        uint16_t min_vec_bds;
 
        /*
         * HNS3 hardware network engine set scattered as default. If the driver
         * is not work in scattered mode and the pkts greater than buf_size
-        * but smaller than max_rx_pkt_len will be distributed to multiple BDs.
+        * but smaller than frame size will be distributed to multiple BDs.
         * Driver cannot handle this situation.
         */
-       if (!hw->data->scattered_rx && rxmode->max_rx_pkt_len > buf_size) {
-               hns3_err(hw, "max_rx_pkt_len is not allowed to be set greater "
+       if (!hw->data->scattered_rx && frame_size > buf_size) {
+               hns3_err(hw, "frame size is not allowed to be set greater "
                             "than rx_buf_len if scattered is off.");
                return -EINVAL;
        }
@@ -1970,7 +1970,7 @@ hns3_rx_scattered_calc(struct rte_eth_dev *dev)
        }
 
        if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_SCATTER ||
-           dev_conf->rxmode.max_rx_pkt_len > hw->rx_buf_len)
+           dev->data->mtu + HNS3_ETH_OVERHEAD > hw->rx_buf_len)
                dev->data->scattered_rx = true;
 }
 
index f856bbed0476c2aca7055bf52d06133f2f30603d..57abc2cf747d4ccebb5f786988019732d0869150 100644 (file)
@@ -11437,14 +11437,10 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EBUSY;
        }
 
-       if (frame_size > I40E_ETH_MAX_LEN)
-               dev_data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_JUMBO_FRAME;
+       if (mtu > RTE_ETHER_MTU)
+               dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
-               dev_data->dev_conf.rxmode.offloads &=
-                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+               dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        return ret;
 }
index 3df4e3de187c4578c6e2f60fd24bb97bb3d8b707..9b030198e537e7aade16ed47bc44270bae279f7d 100644 (file)
@@ -2899,8 +2899,8 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
        }
 
        rxq->max_pkt_len =
-               RTE_MIN((uint32_t)(hw->func_caps.rx_buf_chain_len *
-                       rxq->rx_buf_len), data->dev_conf.rxmode.max_rx_pkt_len);
+               RTE_MIN(hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len,
+                               data->mtu + I40E_ETH_OVERHEAD);
        if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
                if (rxq->max_pkt_len <= I40E_ETH_MAX_LEN ||
                        rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
index 18428049d805fc7150c6fb1374eee133374b6754..5fc663f6bd46c57daacd1e8fc6d7841a8533e4c3 100644 (file)
@@ -576,13 +576,14 @@ iavf_init_rxq(struct rte_eth_dev *dev, struct iavf_rx_queue *rxq)
        struct iavf_hw *hw = IAVF_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct rte_eth_dev_data *dev_data = dev->data;
        uint16_t buf_size, max_pkt_len;
+       uint32_t frame_size = dev->data->mtu + IAVF_ETH_OVERHEAD;
 
        buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
 
        /* Calculate the maximum packet length allowed */
        max_pkt_len = RTE_MIN((uint32_t)
                        rxq->rx_buf_len * IAVF_MAX_CHAINED_RX_BUFFERS,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len);
+                       frame_size);
 
        /* Check if the jumbo frame and maximum packet length are set
         * correctly.
@@ -839,7 +840,7 @@ iavf_dev_start(struct rte_eth_dev *dev)
 
        adapter->stopped = 0;
 
-       vf->max_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       vf->max_pkt_len = dev->data->mtu + IAVF_ETH_OVERHEAD;
        vf->num_queue_pairs = RTE_MAX(dev->data->nb_rx_queues,
                                      dev->data->nb_tx_queues);
        num_queue_pairs = vf->num_queue_pairs;
@@ -1472,15 +1473,13 @@ iavf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EBUSY;
        }
 
-       if (frame_size > IAVF_ETH_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                dev->data->dev_conf.rxmode.offloads |=
                                DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->data->dev_conf.rxmode.offloads &=
                                ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        return ret;
 }
 
index 14f4fe80fef29659cbc5b5f948f185a8d4a3dce9..00d9e873e64f315aadcef4d496c7c1851c052444 100644 (file)
@@ -66,9 +66,8 @@ ice_dcf_init_rxq(struct rte_eth_dev *dev, struct ice_rx_queue *rxq)
        buf_size = rte_pktmbuf_data_room_size(rxq->mp) - RTE_PKTMBUF_HEADROOM;
        rxq->rx_hdr_len = 0;
        rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
-       max_pkt_len = RTE_MIN((uint32_t)
-                             ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
-                             dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       max_pkt_len = RTE_MIN(ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+                             dev->data->mtu + ICE_ETH_OVERHEAD);
 
        /* Check if the jumbo frame and maximum packet length are set
         * correctly.
index 170a12759d52da48b47c63215a5e29a617b6ad6d..878b3b1410c9eca00b2132d93ac256e68183f75e 100644 (file)
@@ -3603,8 +3603,8 @@ ice_dev_start(struct rte_eth_dev *dev)
        pf->adapter_stopped = false;
 
        /* Set the max frame size to default value*/
-       max_frame_size = pf->dev_data->dev_conf.rxmode.max_rx_pkt_len ?
-               pf->dev_data->dev_conf.rxmode.max_rx_pkt_len :
+       max_frame_size = pf->dev_data->mtu ?
+               pf->dev_data->mtu + ICE_ETH_OVERHEAD :
                ICE_FRAME_SIZE_MAX;
 
        /* Set the max frame size to HW*/
@@ -3992,14 +3992,10 @@ ice_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EBUSY;
        }
 
-       if (frame_size > ICE_ETH_MAX_LEN)
-               dev_data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_JUMBO_FRAME;
+       if (mtu > RTE_ETHER_MTU)
+               dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
-               dev_data->dev_conf.rxmode.offloads &=
-                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
-       dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+               dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        return 0;
 }
index a20f4c751a1b2d53b64a2bed01ad293e0475b1aa..220537741d6c4ec9bdeaed1af2f5a0dad4a53bce 100644 (file)
@@ -271,15 +271,16 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
        uint32_t rxdid = ICE_RXDID_COMMS_OVS;
        uint32_t regval;
        struct ice_adapter *ad = rxq->vsi->adapter;
+       uint32_t frame_size = dev_data->mtu + ICE_ETH_OVERHEAD;
 
        /* Set buffer size as the head split is disabled. */
        buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
                              RTE_PKTMBUF_HEADROOM);
        rxq->rx_hdr_len = 0;
        rxq->rx_buf_len = RTE_ALIGN(buf_size, (1 << ICE_RLAN_CTX_DBUF_S));
-       rxq->max_pkt_len = RTE_MIN((uint32_t)
-                                  ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
-                                  dev_data->dev_conf.rxmode.max_rx_pkt_len);
+       rxq->max_pkt_len =
+               RTE_MIN((uint32_t)ICE_SUPPORT_CHAIN_NUM * rxq->rx_buf_len,
+                       frame_size);
 
        if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
                if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
@@ -385,11 +386,8 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
                return -EINVAL;
        }
 
-       buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
-                             RTE_PKTMBUF_HEADROOM);
-
        /* Check if scattered RX needs to be used. */
-       if (rxq->max_pkt_len > buf_size)
+       if (frame_size > buf_size)
                dev_data->scattered_rx = 1;
 
        rxq->qrx_tail = hw->hw_addr + QRX_TAIL(rxq->reg_idx);
index 03a77b37718244338c2dc28aadbaf677b4af5a8b..2b1f2f5a39d999a00bd0c988d8d36580620a2fd2 100644 (file)
 
 #define IGC_INTEL_VENDOR_ID            0x8086
 
-/*
- * The overhead from MTU to max frame size.
- * Considering VLAN so tag needs to be counted.
- */
-#define IGC_ETH_OVERHEAD               (RTE_ETHER_HDR_LEN + \
-                                       RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE)
-
 #define IGC_FC_PAUSE_TIME              0x0680
 #define IGC_LINK_UPDATE_CHECK_TIMEOUT  90  /* 9s */
 #define IGC_LINK_UPDATE_CHECK_INTERVAL 100 /* ms */
@@ -1601,21 +1594,15 @@ eth_igc_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 
        /* switch to jumbo mode if needed */
        if (mtu > RTE_ETHER_MTU) {
-               dev->data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_JUMBO_FRAME;
+               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl |= IGC_RCTL_LPE;
        } else {
-               dev->data->dev_conf.rxmode.offloads &=
-                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+               dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
                rctl &= ~IGC_RCTL_LPE;
        }
        IGC_WRITE_REG(hw, IGC_RCTL, rctl);
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
-       IGC_WRITE_REG(hw, IGC_RLPML,
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
 
        return 0;
 }
@@ -2485,6 +2472,7 @@ static int
 igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
 {
        struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
        uint32_t ctrl_ext;
 
        ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
@@ -2493,23 +2481,14 @@ igc_vlan_hw_extend_disable(struct rte_eth_dev *dev)
        if ((ctrl_ext & IGC_CTRL_EXT_EXT_VLAN) == 0)
                return 0;
 
-       if ((dev->data->dev_conf.rxmode.offloads &
-                       DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
-               goto write_ext_vlan;
-
        /* Update maximum packet length */
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len <
-               RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
+       if (frame_size < RTE_ETHER_MIN_MTU + VLAN_TAG_SIZE) {
                PMD_DRV_LOG(ERR, "Maximum packet length %u error, min is %u",
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len,
-                       VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
+                       frame_size, VLAN_TAG_SIZE + RTE_ETHER_MIN_MTU);
                return -EINVAL;
        }
-       dev->data->dev_conf.rxmode.max_rx_pkt_len -= VLAN_TAG_SIZE;
-       IGC_WRITE_REG(hw, IGC_RLPML,
-               dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       IGC_WRITE_REG(hw, IGC_RLPML, frame_size - VLAN_TAG_SIZE);
 
-write_ext_vlan:
        IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext & ~IGC_CTRL_EXT_EXT_VLAN);
        return 0;
 }
@@ -2518,6 +2497,7 @@ static int
 igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
 {
        struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
+       uint32_t frame_size = dev->data->mtu + IGC_ETH_OVERHEAD;
        uint32_t ctrl_ext;
 
        ctrl_ext = IGC_READ_REG(hw, IGC_CTRL_EXT);
@@ -2526,23 +2506,14 @@ igc_vlan_hw_extend_enable(struct rte_eth_dev *dev)
        if (ctrl_ext & IGC_CTRL_EXT_EXT_VLAN)
                return 0;
 
-       if ((dev->data->dev_conf.rxmode.offloads &
-                       DEV_RX_OFFLOAD_JUMBO_FRAME) == 0)
-               goto write_ext_vlan;
-
        /* Update maximum packet length */
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
-               MAX_RX_JUMBO_FRAME_SIZE - VLAN_TAG_SIZE) {
+       if (frame_size > MAX_RX_JUMBO_FRAME_SIZE) {
                PMD_DRV_LOG(ERR, "Maximum packet length %u error, max is %u",
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                       VLAN_TAG_SIZE, MAX_RX_JUMBO_FRAME_SIZE);
+                       frame_size, MAX_RX_JUMBO_FRAME_SIZE);
                return -EINVAL;
        }
-       dev->data->dev_conf.rxmode.max_rx_pkt_len += VLAN_TAG_SIZE;
-       IGC_WRITE_REG(hw, IGC_RLPML,
-               dev->data->dev_conf.rxmode.max_rx_pkt_len);
+       IGC_WRITE_REG(hw, IGC_RLPML, frame_size);
 
-write_ext_vlan:
        IGC_WRITE_REG(hw, IGC_CTRL_EXT, ctrl_ext | IGC_CTRL_EXT_EXT_VLAN);
        return 0;
 }
index 7b6c209df3b6fdd9abb57a84605ba630cca3b78e..b3473b5b16464e4610f701c24b1621655b546139 100644 (file)
@@ -35,6 +35,13 @@ extern "C" {
 #define IGC_HKEY_REG_SIZE              IGC_DEFAULT_REG_SIZE
 #define IGC_HKEY_SIZE                  (IGC_HKEY_REG_SIZE * IGC_HKEY_MAX_INDEX)
 
+/*
+ * The overhead from MTU to max frame size.
+ * Considering VLAN so tag needs to be counted.
+ */
+#define IGC_ETH_OVERHEAD               (RTE_ETHER_HDR_LEN + \
+                                       RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE * 2)
+
 /*
  * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
  * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
index 383bf834f3b64264a3c247cbd461edcf919ea9c4..9b7a9d953bff68404afaee7e7378dd0f3b4e1ba1 100644 (file)
@@ -1062,7 +1062,7 @@ igc_rx_init(struct rte_eth_dev *dev)
        struct igc_rx_queue *rxq;
        struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
        uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
-       uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       uint32_t max_rx_pktlen;
        uint32_t rctl;
        uint32_t rxcsum;
        uint16_t buf_size;
@@ -1080,17 +1080,17 @@ igc_rx_init(struct rte_eth_dev *dev)
        IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
 
        /* Configure support of jumbo frames, if any. */
-       if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+       if ((offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
                rctl |= IGC_RCTL_LPE;
-
-               /*
-                * Set maximum packet length by default, and might be updated
-                * together with enabling/disabling dual VLAN.
-                */
-               IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len);
-       } else {
+       else
                rctl &= ~IGC_RCTL_LPE;
-       }
+
+       max_rx_pktlen = dev->data->mtu + IGC_ETH_OVERHEAD;
+       /*
+        * Set maximum packet length by default, and might be updated
+        * together with enabling/disabling dual VLAN.
+        */
+       IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pktlen);
 
        /* Configure and enable each RX queue. */
        rctl_bsize = 0;
@@ -1149,7 +1149,7 @@ igc_rx_init(struct rte_eth_dev *dev)
                                        IGC_SRRCTL_BSIZEPKT_SHIFT);
 
                        /* It adds dual VLAN length for supporting dual VLAN */
-                       if (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size)
+                       if (max_rx_pktlen > buf_size)
                                dev->data->scattered_rx = 1;
                } else {
                        /*
index 344c076f309a58502fbe143ee226767795c4c7ad..d5d610c80bcd1a8da1f25e2e7fc349564a06acf0 100644 (file)
@@ -343,25 +343,15 @@ static int
 ionic_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 {
        struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
-       uint32_t max_frame_size;
        int err;
 
        IONIC_PRINT_CALL();
 
        /*
         * Note: mtu check against IONIC_MIN_MTU, IONIC_MAX_MTU
-        * is done by the the API.
+        * is done by the API.
         */
 
-       /*
-        * Max frame size is MTU + Ethernet header + VLAN + QinQ
-        * (plus ETHER_CRC_LEN if the adapter is able to keep CRC)
-        */
-       max_frame_size = mtu + RTE_ETHER_HDR_LEN + 4 + 4;
-
-       if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len < max_frame_size)
-               return -EINVAL;
-
        err = ionic_lif_change_mtu(lif, mtu);
        if (err)
                return err;
index 67631a5813b79a55e8229cc111a925cb58799eef..4d16a39c6b6dceabc1189f24cc2bec192fac37f3 100644 (file)
@@ -771,7 +771,7 @@ ionic_rx_clean(struct ionic_rx_qcq *rxq,
        struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
        struct rte_mbuf *rxm, *rxm_seg;
        uint32_t max_frame_size =
-               rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+               rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
        uint64_t pkt_flags = 0;
        uint32_t pkt_type;
        struct ionic_rx_stats *stats = &rxq->stats;
@@ -1014,7 +1014,7 @@ ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
 int __rte_cold
 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
 {
-       uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       uint32_t frame_size = eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
        uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
        struct ionic_rx_qcq *rxq;
        int err;
@@ -1128,7 +1128,7 @@ ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
 {
        struct ionic_rx_qcq *rxq = rx_queue;
        uint32_t frame_size =
-               rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+               rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
        struct ionic_rx_service service_cb_arg;
 
        service_cb_arg.rx_pkts = rx_pkts;
index 694435a4ae246b032c0c8ce7b1d4f98abd415738..0d1aaa6449b96d23db7010036b036be63277dc68 100644 (file)
@@ -2791,14 +2791,10 @@ ipn3ke_rpst_mtu_set(struct rte_eth_dev *ethdev, uint16_t mtu)
                return -EBUSY;
        }
 
-       if (frame_size > IPN3KE_ETH_MAX_LEN)
-               dev_data->dev_conf.rxmode.offloads |=
-                       (uint64_t)(DEV_RX_OFFLOAD_JUMBO_FRAME);
+       if (mtu > RTE_ETHER_MTU)
+               dev_data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
-               dev_data->dev_conf.rxmode.offloads &=
-                       (uint64_t)(~DEV_RX_OFFLOAD_JUMBO_FRAME);
-
-       dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+               dev_data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        if (rpst->i40e_pf_eth) {
                ret = rpst->i40e_pf_eth->dev_ops->mtu_set(rpst->i40e_pf_eth,
index 4dbe049fe9860ea656a9bb14576846d66c0ccc1a..29456ab59502e6d9ef32e61187aa62a355b48903 100644 (file)
@@ -5165,7 +5165,6 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        struct ixgbe_hw *hw;
        struct rte_eth_dev_info dev_info;
        uint32_t frame_size = mtu + IXGBE_ETH_OVERHEAD;
-       struct rte_eth_dev_data *dev_data = dev->data;
        int ret;
 
        ret = ixgbe_dev_info_get(dev, &dev_info);
@@ -5179,9 +5178,9 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        /* If device is started, refuse mtu that requires the support of
         * scattered packets when this feature has not been enabled before.
         */
-       if (dev_data->dev_started && !dev_data->scattered_rx &&
-           (frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
-            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+       if (dev->data->dev_started && !dev->data->scattered_rx &&
+           frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
+                       dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) {
                PMD_INIT_LOG(ERR, "Stop port first.");
                return -EINVAL;
        }
@@ -5190,23 +5189,18 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
 
        /* switch to jumbo mode if needed */
-       if (frame_size > IXGBE_ETH_MAX_LEN) {
-               dev->data->dev_conf.rxmode.offloads |=
-                       DEV_RX_OFFLOAD_JUMBO_FRAME;
+       if (mtu > RTE_ETHER_MTU) {
+               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
        } else {
-               dev->data->dev_conf.rxmode.offloads &=
-                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+               dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
        }
        IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
        maxfrs &= 0x0000FFFF;
-       maxfrs |= (dev->data->dev_conf.rxmode.max_rx_pkt_len << 16);
+       maxfrs |= (frame_size << 16);
        IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
 
        return 0;
@@ -6078,12 +6072,10 @@ ixgbe_set_queue_rate_limit(struct rte_eth_dev *dev,
         * set as 0x4.
         */
        if ((rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
-           (rxmode->max_rx_pkt_len >= IXGBE_MAX_JUMBO_FRAME_SIZE))
-               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
-                       IXGBE_MMW_SIZE_JUMBO_FRAME);
+           (dev->data->mtu + IXGBE_ETH_OVERHEAD >= IXGBE_MAX_JUMBO_FRAME_SIZE))
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_JUMBO_FRAME);
        else
-               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM,
-                       IXGBE_MMW_SIZE_DEFAULT);
+               IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRM, IXGBE_MMW_SIZE_DEFAULT);
 
        /* Set RTTBCNRC of queue X */
        IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_idx);
@@ -6355,8 +6347,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 
        hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       if (mtu < RTE_ETHER_MIN_MTU ||
-                       max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
+       if (mtu < RTE_ETHER_MIN_MTU || max_frame > RTE_ETHER_MAX_JUMBO_FRAME_LEN)
                return -EINVAL;
 
        /* If device is started, refuse mtu that requires the support of
@@ -6364,7 +6355,7 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
         */
        if (dev_data->dev_started && !dev_data->scattered_rx &&
            (max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
-            dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
+                       dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) {
                PMD_INIT_LOG(ERR, "Stop port first.");
                return -EINVAL;
        }
@@ -6381,8 +6372,6 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        if (ixgbevf_rlpml_set_vf(hw, max_frame))
                return -EINVAL;
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
        return 0;
 }
 
index a1529b4d56595e632ec693c06995bf2730bffc07..4ceb5bf322d84860d2da49a7e61c0d0d9256c06a 100644 (file)
@@ -573,8 +573,7 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
                          * if PF has jumbo frames enabled which means legacy
                          * VFs are disabled.
                          */
-                       if (dev->data->dev_conf.rxmode.max_rx_pkt_len >
-                           IXGBE_ETH_MAX_LEN)
+                       if (dev->data->mtu > RTE_ETHER_MTU)
                                break;
                        /* fall through */
                default:
@@ -584,8 +583,7 @@ ixgbe_set_vf_lpe(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf)
                         * legacy VFs.
                         */
                        if (max_frame > IXGBE_ETH_MAX_LEN ||
-                           dev->data->dev_conf.rxmode.max_rx_pkt_len >
-                           IXGBE_ETH_MAX_LEN)
+                                       dev->data->mtu > RTE_ETHER_MTU)
                                return -1;
                        break;
                }
index 4d3d30b6622eb4384a2fff387fb23ad5a6cc6b83..575cc8c4ffe52fdcc083e5295f95256efb2701b6 100644 (file)
@@ -5047,6 +5047,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        uint16_t buf_size;
        uint16_t i;
        struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
+       uint32_t frame_size = dev->data->mtu + IXGBE_ETH_OVERHEAD;
        int rc;
 
        PMD_INIT_FUNC_TRACE();
@@ -5082,7 +5083,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                hlreg0 |= IXGBE_HLREG0_JUMBOEN;
                maxfrs = IXGBE_READ_REG(hw, IXGBE_MAXFRS);
                maxfrs &= 0x0000FFFF;
-               maxfrs |= (rx_conf->max_rx_pkt_len << 16);
+               maxfrs |= (frame_size << 16);
                IXGBE_WRITE_REG(hw, IXGBE_MAXFRS, maxfrs);
        } else
                hlreg0 &= ~IXGBE_HLREG0_JUMBOEN;
@@ -5156,8 +5157,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                                       IXGBE_SRRCTL_BSIZEPKT_SHIFT);
 
                /* It adds dual VLAN length for supporting dual VLAN */
-               if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                           2 * IXGBE_VLAN_TAG_SIZE > buf_size)
+               if (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > buf_size)
                        dev->data->scattered_rx = 1;
                if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
@@ -5637,6 +5637,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
        struct ixgbe_hw     *hw;
        struct ixgbe_rx_queue *rxq;
        struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+       uint32_t frame_size = dev->data->mtu + IXGBE_ETH_OVERHEAD;
        uint64_t bus_addr;
        uint32_t srrctl, psrtype = 0;
        uint16_t buf_size;
@@ -5673,10 +5674,9 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
         * ixgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
         * VF packets received can work in all cases.
         */
-       if (ixgbevf_rlpml_set_vf(hw,
-           (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
+       if (ixgbevf_rlpml_set_vf(hw, frame_size) != 0) {
                PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
-                            dev->data->dev_conf.rxmode.max_rx_pkt_len);
+                            frame_size);
                return -EINVAL;
        }
 
@@ -5735,8 +5735,7 @@ ixgbevf_dev_rx_init(struct rte_eth_dev *dev)
 
                if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
                    /* It adds dual VLAN length for supporting dual VLAN */
-                   (rxmode->max_rx_pkt_len +
-                               2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
+                   (frame_size + 2 * IXGBE_VLAN_TAG_SIZE) > buf_size) {
                        if (!dev->data->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
                        dev->data->scattered_rx = 1;
index 41b3f63ac059e892fd06e073ced132925a9d61c9..3fac28dcfcf955e92f877915a91f0cb5a60a191d 100644 (file)
@@ -435,7 +435,6 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
 {
        struct lio_device *lio_dev = LIO_DEV(eth_dev);
        uint16_t pf_mtu = lio_dev->linfo.link.s.mtu;
-       uint32_t frame_len = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
        struct lio_dev_ctrl_cmd ctrl_cmd;
        struct lio_ctrl_pkt ctrl_pkt;
 
@@ -481,16 +480,13 @@ lio_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
                return -1;
        }
 
-       if (frame_len > LIO_ETH_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                eth_dev->data->dev_conf.rxmode.offloads |=
                        DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                eth_dev->data->dev_conf.rxmode.offloads &=
                        ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_len;
-       eth_dev->data->mtu = mtu;
-
        return 0;
 }
 
@@ -1402,8 +1398,6 @@ lio_sync_link_state_check(void *eth_dev)
 static int
 lio_dev_start(struct rte_eth_dev *eth_dev)
 {
-       uint16_t mtu;
-       uint32_t frame_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
        struct lio_device *lio_dev = LIO_DEV(eth_dev);
        uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
        int ret = 0;
@@ -1446,15 +1440,9 @@ lio_dev_start(struct rte_eth_dev *eth_dev)
                goto dev_mtu_set_error;
        }
 
-       mtu = (uint16_t)(frame_len - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN);
-       if (mtu < RTE_ETHER_MIN_MTU)
-               mtu = RTE_ETHER_MIN_MTU;
-
-       if (eth_dev->data->mtu != mtu) {
-               ret = lio_dev_mtu_set(eth_dev, mtu);
-               if (ret)
-                       goto dev_mtu_set_error;
-       }
+       ret = lio_dev_mtu_set(eth_dev, eth_dev->data->mtu);
+       if (ret != 0)
+               goto dev_mtu_set_error;
 
        return 0;
 
index 2b75c07fad755413dbc9637d000dfc6ca68a0107..1801d87334a179ffd1c985073dfbfcfc02e86cb8 100644 (file)
@@ -753,6 +753,7 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        int ret;
        uint32_t crc_present;
        uint64_t offloads;
+       uint32_t max_rx_pktlen;
 
        offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
 
@@ -829,13 +830,11 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        dev->data->rx_queues[idx] = rxq;
        /* Enable scattered packets support for this queue if necessary. */
        MLX4_ASSERT(mb_len >= RTE_PKTMBUF_HEADROOM);
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len <=
-           (mb_len - RTE_PKTMBUF_HEADROOM)) {
+       max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+       if (max_rx_pktlen <= (mb_len - RTE_PKTMBUF_HEADROOM)) {
                ;
        } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
-               uint32_t size =
-                       RTE_PKTMBUF_HEADROOM +
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len;
+               uint32_t size = RTE_PKTMBUF_HEADROOM + max_rx_pktlen;
                uint32_t sges_n;
 
                /*
@@ -847,21 +846,19 @@ mlx4_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                /* Make sure sges_n did not overflow. */
                size = mb_len * (1 << rxq->sges_n);
                size -= RTE_PKTMBUF_HEADROOM;
-               if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) {
+               if (size < max_rx_pktlen) {
                        rte_errno = EOVERFLOW;
                        ERROR("%p: too many SGEs (%u) needed to handle"
                              " requested maximum packet size %u",
                              (void *)dev,
-                             1 << sges_n,
-                             dev->data->dev_conf.rxmode.max_rx_pkt_len);
+                             1 << sges_n, max_rx_pktlen);
                        goto error;
                }
        } else {
                WARN("%p: the requested maximum Rx packet size (%u) is"
                     " larger than a single mbuf (%u) and scattered"
                     " mode has not been requested",
-                    (void *)dev,
-                    dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                    (void *)dev, max_rx_pktlen,
                     mb_len - RTE_PKTMBUF_HEADROOM);
        }
        DEBUG("%p: maximum number of segments per packet: %u",
index 247f36e5d7826cb38969c806525fe41d4ac6b47b..4f82de375d04204f4ba957ad24ed51bb79beb317 100644 (file)
@@ -1338,10 +1338,11 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        uint64_t offloads = conf->offloads |
                           dev->data->dev_conf.rxmode.offloads;
        unsigned int lro_on_queue = !!(offloads & DEV_RX_OFFLOAD_TCP_LRO);
-       unsigned int max_rx_pkt_len = lro_on_queue ?
+       unsigned int max_rx_pktlen = lro_on_queue ?
                        dev->data->dev_conf.rxmode.max_lro_pkt_size :
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len;
-       unsigned int non_scatter_min_mbuf_size = max_rx_pkt_len +
+                       dev->data->mtu + (unsigned int)RTE_ETHER_HDR_LEN +
+                               RTE_ETHER_CRC_LEN;
+       unsigned int non_scatter_min_mbuf_size = max_rx_pktlen +
                                                        RTE_PKTMBUF_HEADROOM;
        unsigned int max_lro_size = 0;
        unsigned int first_mb_free_size = mb_len - RTE_PKTMBUF_HEADROOM;
@@ -1380,7 +1381,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
         * needed to handle max size packets, replace zero length
         * with the buffer length from the pool.
         */
-       tail_len = max_rx_pkt_len;
+       tail_len = max_rx_pktlen;
        do {
                struct mlx5_eth_rxseg *hw_seg =
                                        &tmpl->rxq.rxseg[tmpl->rxq.rxseg_n];
@@ -1418,7 +1419,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                "port %u too many SGEs (%u) needed to handle"
                                " requested maximum packet size %u, the maximum"
                                " supported are %u", dev->data->port_id,
-                               tmpl->rxq.rxseg_n, max_rx_pkt_len,
+                               tmpl->rxq.rxseg_n, max_rx_pktlen,
                                MLX5_MAX_RXQ_NSEG);
                        rte_errno = ENOTSUP;
                        goto error;
@@ -1443,7 +1444,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                DRV_LOG(ERR, "port %u Rx queue %u: Scatter offload is not"
                        " configured and no enough mbuf space(%u) to contain "
                        "the maximum RX packet length(%u) with head-room(%u)",
-                       dev->data->port_id, idx, mb_len, max_rx_pkt_len,
+                       dev->data->port_id, idx, mb_len, max_rx_pktlen,
                        RTE_PKTMBUF_HEADROOM);
                rte_errno = ENOSPC;
                goto error;
@@ -1464,7 +1465,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
         * following conditions are met:
         *  - MPRQ is enabled.
         *  - The number of descs is more than the number of strides.
-        *  - max_rx_pkt_len plus overhead is less than the max size
+        *  - max_rx_pktlen plus overhead is less than the max size
         *    of a stride or mprq_stride_size is specified by a user.
         *    Need to make sure that there are enough strides to encap
         *    the maximum packet size in case mprq_stride_size is set.
@@ -1488,7 +1489,7 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                !!(offloads & DEV_RX_OFFLOAD_SCATTER);
                tmpl->rxq.mprq_max_memcpy_len = RTE_MIN(first_mb_free_size,
                                config->mprq.max_memcpy_len);
-               max_lro_size = RTE_MIN(max_rx_pkt_len,
+               max_lro_size = RTE_MIN(max_rx_pktlen,
                                       (1u << tmpl->rxq.strd_num_n) *
                                       (1u << tmpl->rxq.strd_sz_n));
                DRV_LOG(DEBUG,
@@ -1497,9 +1498,9 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                        dev->data->port_id, idx,
                        tmpl->rxq.strd_num_n, tmpl->rxq.strd_sz_n);
        } else if (tmpl->rxq.rxseg_n == 1) {
-               MLX5_ASSERT(max_rx_pkt_len <= first_mb_free_size);
+               MLX5_ASSERT(max_rx_pktlen <= first_mb_free_size);
                tmpl->rxq.sges_n = 0;
-               max_lro_size = max_rx_pkt_len;
+               max_lro_size = max_rx_pktlen;
        } else if (offloads & DEV_RX_OFFLOAD_SCATTER) {
                unsigned int sges_n;
 
@@ -1521,13 +1522,13 @@ mlx5_rxq_new(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                                "port %u too many SGEs (%u) needed to handle"
                                " requested maximum packet size %u, the maximum"
                                " supported are %u", dev->data->port_id,
-                               1 << sges_n, max_rx_pkt_len,
+                               1 << sges_n, max_rx_pktlen,
                                1u << MLX5_MAX_LOG_RQ_SEGS);
                        rte_errno = ENOTSUP;
                        goto error;
                }
                tmpl->rxq.sges_n = sges_n;
-               max_lro_size = max_rx_pkt_len;
+               max_lro_size = max_rx_pktlen;
        }
        if (config->mprq.enabled && !mlx5_rxq_mprq_enabled(&tmpl->rxq))
                DRV_LOG(WARNING,
index 90f2466369164c692a86d2f704ac30189c21b78c..2a02880873571fad0743681c46e5b4154f9e22b1 100644 (file)
@@ -126,10 +126,6 @@ mvneta_dev_configure(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
-               dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                                MRVL_NETA_ETH_HDRS_LEN;
-
        if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
                priv->multiseg = 1;
 
@@ -261,9 +257,6 @@ mvneta_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       dev->data->mtu = mtu;
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
-
        if (!priv->ppio)
                /* It is OK. New MTU will be set later on mvneta_dev_start */
                return 0;
index 2d61930382cb57772a1e57bece6695d3fe9b3140..9836bb071a829df6acc64412c4d9dc5877f39b9b 100644 (file)
@@ -708,19 +708,18 @@ mvneta_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        struct mvneta_priv *priv = dev->data->dev_private;
        struct mvneta_rxq *rxq;
        uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
-       uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
 
        frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MVNETA_PKT_EFFEC_OFFS;
 
-       if (frame_size < max_rx_pkt_len) {
+       if (frame_size < max_rx_pktlen) {
                MVNETA_LOG(ERR,
                        "Mbuf size must be increased to %u bytes to hold up "
                        "to %u bytes of data.",
-                       buf_size + max_rx_pkt_len - frame_size,
-                       max_rx_pkt_len);
-               dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-               MVNETA_LOG(INFO, "Setting max rx pkt len to %u",
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len);
+                       max_rx_pktlen + buf_size - frame_size,
+                       max_rx_pktlen);
+               dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
+               MVNETA_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
        }
 
        if (dev->data->rx_queues[idx]) {
index 65d011300a978a49e9fb83f5cb039b3f54858a92..44761b695a8d7a061fd4edf5e96780243ca51bed 100644 (file)
@@ -496,16 +496,11 @@ mrvl_dev_configure(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
-       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                                MRVL_PP2_ETH_HDRS_LEN;
-               if (dev->data->mtu > priv->max_mtu) {
-                       MRVL_LOG(ERR, "inherit MTU %u from max_rx_pkt_len %u is larger than max_mtu %u\n",
-                                dev->data->mtu,
-                                dev->data->dev_conf.rxmode.max_rx_pkt_len,
-                                priv->max_mtu);
-                       return -EINVAL;
-               }
+       if (dev->data->dev_conf.rxmode.mtu > priv->max_mtu) {
+               MRVL_LOG(ERR, "MTU %u is larger than max_mtu %u\n",
+                        dev->data->dev_conf.rxmode.mtu,
+                        priv->max_mtu);
+               return -EINVAL;
        }
 
        if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
@@ -595,9 +590,6 @@ mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       dev->data->mtu = mtu;
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
-
        if (!priv->ppio)
                return 0;
 
@@ -1994,7 +1986,7 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
        struct mrvl_priv *priv = dev->data->dev_private;
        struct mrvl_rxq *rxq;
        uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
-       uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+       uint32_t max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
        int ret, tc, inq;
        uint64_t offloads;
 
@@ -2009,17 +2001,15 @@ mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
                return -EFAULT;
        }
 
-       frame_size = buf_size - RTE_PKTMBUF_HEADROOM -
-                    MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN;
-       if (frame_size < max_rx_pkt_len) {
+       frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
+       if (frame_size < max_rx_pktlen) {
                MRVL_LOG(WARNING,
                        "Mbuf size must be increased to %u bytes to hold up "
                        "to %u bytes of data.",
-                       buf_size + max_rx_pkt_len - frame_size,
-                       max_rx_pkt_len);
-               dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-               MRVL_LOG(INFO, "Setting max rx pkt len to %u",
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len);
+                       max_rx_pktlen + buf_size - frame_size,
+                       max_rx_pktlen);
+               dev->data->mtu = frame_size - RTE_ETHER_HDR_LEN;
+               MRVL_LOG(INFO, "Setting MTU to %u", dev->data->mtu);
        }
 
        if (dev->data->rx_queues[idx]) {
index 4395a09c597d5e3e36416de2d024abf38cfa930b..928b4983a07a388daf3fa306bc4f38764d5c237d 100644 (file)
@@ -370,7 +370,7 @@ nfp_check_offloads(struct rte_eth_dev *dev)
        }
 
        if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
-               hw->mtu = rxmode->max_rx_pkt_len;
+               hw->mtu = dev->data->mtu;
 
        if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
                ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
@@ -963,16 +963,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        }
 
        /* switch to jumbo mode if needed */
-       if ((uint32_t)mtu > RTE_ETHER_MTU)
+       if (mtu > RTE_ETHER_MTU)
                dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
-
        /* writing to configuration space */
-       nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
+       nn_cfg_writel(hw, NFP_NET_CFG_MTU, mtu);
 
        hw->mtu = mtu;
 
index 096b5f6ae3da545ef68888d593c2c5fe652ebd43..67c7d8929eb28332496ee0c4bf21f24e961c67a7 100644 (file)
@@ -552,13 +552,11 @@ octeontx_dev_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
        if (rc)
                return rc;
 
-       if (frame_size > OCCTX_L2_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                nic->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                nic->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       /* Update max_rx_pkt_len */
-       data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
        octeontx_log_info("Received pkt beyond  maxlen %d will be dropped",
                          frame_size);
 
@@ -581,7 +579,7 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
        buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
        /* Setup scatter mode if needed by jumbo */
-       if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+       if (data->mtu > buffsz) {
                nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
                nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev);
                nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev);
@@ -593,8 +591,8 @@ octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq)
        evdev_priv->rx_offload_flags = nic->rx_offload_flags;
        evdev_priv->tx_offload_flags = nic->tx_offload_flags;
 
-       /* Setup MTU based on max_rx_pkt_len */
-       nic->mtu = data->dev_conf.rxmode.max_rx_pkt_len - OCCTX_L2_OVERHEAD;
+       /* Setup MTU */
+       nic->mtu = data->mtu;
 
        return 0;
 }
@@ -615,7 +613,7 @@ octeontx_dev_start(struct rte_eth_dev *dev)
                octeontx_recheck_rx_offloads(rxq);
        }
 
-       /* Setting up the mtu based on max_rx_pkt_len */
+       /* Setting up the mtu */
        ret = octeontx_dev_mtu_set(dev, nic->mtu);
        if (ret) {
                octeontx_log_err("Failed to set default MTU size %d", ret);
index 69266e65141b0c0f053ed2aeb4f492a8fdfe8d0d..f491e20e95c162117c10ef3cfdf697e5e6eff3f4 100644 (file)
@@ -913,7 +913,7 @@ otx2_nix_enable_mseg_on_jumbo(struct otx2_eth_rxq *rxq)
        mbp_priv = rte_mempool_get_priv(rxq->pool);
        buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
 
-       if (eth_dev->data->dev_conf.rxmode.max_rx_pkt_len > buffsz) {
+       if (eth_dev->data->mtu + (uint32_t)NIX_L2_OVERHEAD > buffsz) {
                dev->rx_offloads |= DEV_RX_OFFLOAD_SCATTER;
                dev->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
 
index 3a763f691ba4d76d9c7e8e6bb412702433446330..3c591c8fbaa09ed5c1cc17cedce6cd2b57697f69 100644 (file)
@@ -59,14 +59,11 @@ otx2_nix_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
        if (rc)
                return rc;
 
-       if (frame_size > NIX_L2_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                dev->rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       /* Update max_rx_pkt_len */
-       data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
-
        return rc;
 }
 
@@ -75,7 +72,6 @@ otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
 {
        struct rte_eth_dev_data *data = eth_dev->data;
        struct otx2_eth_rxq *rxq;
-       uint16_t mtu;
        int rc;
 
        rxq = data->rx_queues[0];
@@ -83,10 +79,7 @@ otx2_nix_recalc_mtu(struct rte_eth_dev *eth_dev)
        /* Setup scatter mode if needed by jumbo */
        otx2_nix_enable_mseg_on_jumbo(rxq);
 
-       /* Setup MTU based on max_rx_pkt_len */
-       mtu = data->dev_conf.rxmode.max_rx_pkt_len - NIX_L2_OVERHEAD;
-
-       rc = otx2_nix_mtu_set(eth_dev, mtu);
+       rc = otx2_nix_mtu_set(eth_dev, data->mtu);
        if (rc)
                otx2_err("Failed to set default MTU size %d", rc);
 
index d6a69449073b14e6415fed7415f6bb09aab2e100..4cc002ee8fab99dbf9e06399b3fdef4391441dbb 100644 (file)
@@ -670,16 +670,11 @@ pfe_link_up(struct rte_eth_dev *dev)
 static int
 pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
-       int ret;
        struct pfe_eth_priv_s *priv = dev->data->dev_private;
        uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
 
        /*TODO Support VLAN*/
-       ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size);
-       if (!ret)
-               dev->data->mtu = mtu;
-
-       return ret;
+       return gemac_set_rx(priv->EMAC_baseaddr, frame_size);
 }
 
 /* pfe_eth_enet_addr_byte_mac
index fd8c62a1826b7a0772f02bbcc614a31654c2fa49..a1cf913dc8ed49f9f5750b3ec1b2c03a6883e116 100644 (file)
@@ -1312,12 +1312,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev)
                        return -ENOMEM;
        }
 
-       /* If jumbo enabled adjust MTU */
-       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
-               eth_dev->data->mtu =
-                       eth_dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                       RTE_ETHER_HDR_LEN - QEDE_ETH_OVERHEAD;
-
        if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER)
                eth_dev->data->scattered_rx = 1;
 
@@ -2315,7 +2309,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct rte_eth_dev_info dev_info = {0};
        struct qede_fastpath *fp;
-       uint32_t max_rx_pkt_len;
        uint32_t frame_size;
        uint16_t bufsz;
        bool restart = false;
@@ -2327,8 +2320,8 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                DP_ERR(edev, "Error during getting ethernet device info\n");
                return rc;
        }
-       max_rx_pkt_len = mtu + QEDE_MAX_ETHER_HDR_LEN;
-       frame_size = max_rx_pkt_len;
+
+       frame_size = mtu + QEDE_MAX_ETHER_HDR_LEN;
        if (mtu < RTE_ETHER_MIN_MTU || frame_size > dev_info.max_rx_pktlen) {
                DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n",
                       mtu, dev_info.max_rx_pktlen - RTE_ETHER_HDR_LEN -
@@ -2368,7 +2361,7 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                        fp->rxq->rx_buf_size = rc;
                }
        }
-       if (frame_size > QEDE_ETH_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -2378,9 +2371,6 @@ static int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                dev->data->dev_started = 1;
        }
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len;
-
        return 0;
 }
 
index 35cde561ba59c62a69aefface67ac3afbaf570cb..c2263787b4ec15e0212b49546fff3f63366a067b 100644 (file)
@@ -224,7 +224,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
        struct qede_rx_queue *rxq;
-       uint16_t max_rx_pkt_len;
+       uint16_t max_rx_pktlen;
        uint16_t bufsz;
        int rc;
 
@@ -243,21 +243,21 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qid,
                dev->data->rx_queues[qid] = NULL;
        }
 
-       max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
+       max_rx_pktlen = dev->data->mtu + RTE_ETHER_HDR_LEN;
 
        /* Fix up RX buffer size */
        bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
        /* cache align the mbuf size to simplfy rx_buf_size calculation */
        bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
        if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
-           (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
+           (max_rx_pktlen + QEDE_ETH_OVERHEAD) > bufsz) {
                if (!dev->data->scattered_rx) {
                        DP_INFO(edev, "Forcing scatter-gather mode\n");
                        dev->data->scattered_rx = 1;
                }
        }
 
-       rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
+       rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pktlen);
        if (rc < 0)
                return rc;
 
index 8ec56a9ed57dbdbd2fe00b4c84b2a42257eb64d2..d3b12675e5bf64392ebf12b8a5998ddd334e355a 100644 (file)
@@ -1142,15 +1142,13 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
 
        /*
         * The driver does not use it, but other PMDs update jumbo frame
-        * flag and max_rx_pkt_len when MTU is set.
+        * flag when MTU is set.
         */
        if (mtu > RTE_ETHER_MTU) {
                struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
                rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        }
 
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
-
        sfc_adapter_unlock(sa);
 
        sfc_log_init(sa, "done");
index 7a3f59a1123da1683ace15f3b7ef540f5d7fbca4..5320d8903dacf5ba28152f6be6b8f4043170eee2 100644 (file)
@@ -383,14 +383,10 @@ sfc_port_configure(struct sfc_adapter *sa)
 {
        const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
        struct sfc_port *port = &sa->port;
-       const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
 
        sfc_log_init(sa, "entry");
 
-       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
-               port->pdu = rxmode->max_rx_pkt_len;
-       else
-               port->pdu = EFX_MAC_PDU(dev_data->mtu);
+       port->pdu = EFX_MAC_PDU(dev_data->mtu);
 
        return 0;
 }
index 046f17669d03d16b93e1d8f3d0ec9fd9c426fa68..e4f1ad45219e6c00081803bf84a0a79d4fa4614b 100644 (file)
@@ -1627,13 +1627,8 @@ tap_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
 {
        struct pmd_internals *pmd = dev->data->dev_private;
        struct ifreq ifr = { .ifr_mtu = mtu };
-       int err = 0;
 
-       err = tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
-       if (!err)
-               dev->data->mtu = mtu;
-
-       return err;
+       return tap_ioctl(pmd, SIOCSIFMTU, &ifr, 1, LOCAL_AND_REMOTE);
 }
 
 static int
index b08701bce752317d7918f3037b1a5b5e59e0ac77..80f51c0d9f6ce165237dbee49c060596a94c36d1 100644 (file)
@@ -176,7 +176,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS))
                return -EINVAL;
 
-       if (frame_size > NIC_HW_L2_MAX_LEN)
+       if (mtu > RTE_ETHER_MTU)
                rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
@@ -184,8 +184,6 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        if (nicvf_mbox_update_hw_max_frs(nic, mtu))
                return -EINVAL;
 
-       /* Update max_rx_pkt_len */
-       rxmode->max_rx_pkt_len = mtu + RTE_ETHER_HDR_LEN;
        nic->mtu = mtu;
 
        for (i = 0; i < nic->sqs_count; i++)
@@ -1723,16 +1721,13 @@ nicvf_dev_start(struct rte_eth_dev *dev)
        }
 
        /* Setup scatter mode if needed by jumbo */
-       if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                           2 * VLAN_TAG_SIZE > buffsz)
+       if (dev->data->mtu + (uint32_t)NIC_HW_L2_OVERHEAD + 2 * VLAN_TAG_SIZE > buffsz)
                dev->data->scattered_rx = 1;
        if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
                dev->data->scattered_rx = 1;
 
-       /* Setup MTU based on max_rx_pkt_len or default */
-       mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
-               dev->data->dev_conf.rxmode.max_rx_pkt_len
-                       -  RTE_ETHER_HDR_LEN : RTE_ETHER_MTU;
+       /* Setup MTU */
+       mtu = dev->data->mtu;
 
        if (nicvf_dev_set_mtu(dev, mtu)) {
                PMD_INIT_LOG(ERR, "Failed to set default mtu size");
index dc822d69f742b9057e5f303f01e6eeaf71a90fab..45afe872bde0c61085ab8431a04650ec078afcca 100644 (file)
@@ -3482,8 +3482,11 @@ txgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
        }
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
+       /* switch to jumbo mode if needed */
+       if (mtu > RTE_ETHER_MTU)
+               dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       else
+               dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        if (hw->mode)
                wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
index 528f11439bbdb30ef0cd8ed3a155f759a52bf128..fd65d89ffe7d868e54ecc00fa51eff0cbb934b27 100644 (file)
 #define TXGBE_5TUPLE_MAX_PRI            7
 #define TXGBE_5TUPLE_MIN_PRI            1
 
+
+/* The overhead from MTU to max frame size. */
+#define TXGBE_ETH_OVERHEAD (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN)
+
 #define TXGBE_RSS_OFFLOAD_ALL ( \
        ETH_RSS_IPV4 | \
        ETH_RSS_NONFRAG_IPV4_TCP | \
index 896da8a88770a714ecb46fff91b419e22ff13f61..43dc0ed39b753dcf3c55947f5895674cdb42f9d7 100644 (file)
@@ -1128,8 +1128,6 @@ txgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        if (txgbevf_rlpml_set_vf(hw, max_frame))
                return -EINVAL;
 
-       /* update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame;
        return 0;
 }
 
index 596142378ad955347197200e7d3b14ff492053cc..5cd6ecc2a3994cf73eafeb193fee0da1a752d900 100644 (file)
@@ -4326,13 +4326,8 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
        /*
         * Configure jumbo frame support, if any.
         */
-       if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
-                       TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
-       } else {
-               wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
-                       TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
-       }
+       wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
+               TXGBE_FRMSZ_MAX(dev->data->mtu + TXGBE_ETH_OVERHEAD));
 
        /*
         * If loopback mode is configured, set LPBK bit.
@@ -4394,8 +4389,8 @@ txgbe_dev_rx_init(struct rte_eth_dev *dev)
                wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
 
                /* It adds dual VLAN length for supporting dual VLAN */
-               if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
-                                           2 * TXGBE_VLAN_TAG_SIZE > buf_size)
+               if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
+                               2 * TXGBE_VLAN_TAG_SIZE > buf_size)
                        dev->data->scattered_rx = 1;
                if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
@@ -4847,9 +4842,9 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
         * VF packets received can work in all cases.
         */
        if (txgbevf_rlpml_set_vf(hw,
-           (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len)) {
+           (uint16_t)dev->data->mtu + TXGBE_ETH_OVERHEAD)) {
                PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
-                            dev->data->dev_conf.rxmode.max_rx_pkt_len);
+                            dev->data->mtu + TXGBE_ETH_OVERHEAD);
                return -EINVAL;
        }
 
@@ -4911,7 +4906,7 @@ txgbevf_dev_rx_init(struct rte_eth_dev *dev)
 
                if (rxmode->offloads & DEV_RX_OFFLOAD_SCATTER ||
                    /* It adds dual VLAN length for supporting dual VLAN */
-                   (rxmode->max_rx_pkt_len +
+                   (dev->data->mtu + TXGBE_ETH_OVERHEAD +
                                2 * TXGBE_VLAN_TAG_SIZE) > buf_size) {
                        if (!dev->data->scattered_rx)
                                PMD_INIT_LOG(DEBUG, "forcing scatter mode");
index aff791fbd0c06ed220f66bd7ebeffb6f84d9e63d..a28f9607277e77f651d6564f4eca1d81864d9641 100644 (file)
@@ -924,7 +924,6 @@ virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        }
 
        hw->max_rx_pkt_len = frame_size;
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = hw->max_rx_pkt_len;
 
        return 0;
 }
@@ -2107,14 +2106,10 @@ virtio_dev_configure(struct rte_eth_dev *dev)
                        return ret;
        }
 
-       if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) &&
-           (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len))
+       if (rxmode->mtu > hw->max_mtu)
                req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
 
-       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
-               hw->max_rx_pkt_len = rxmode->max_rx_pkt_len;
-       else
-               hw->max_rx_pkt_len = ether_hdr_len + dev->data->mtu;
+       hw->max_rx_pkt_len = ether_hdr_len + rxmode->mtu;
 
        if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
                           DEV_RX_OFFLOAD_TCP_CKSUM))
index 95fd0b814494e4b4ad39c029c7639ca3f2a0685b..a26076b312e57b33377d0a0f4a52f15f89a8e2c2 100644 (file)
@@ -71,7 +71,6 @@ mbuf_input(struct rte_mbuf *mbuf)
 static const struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .txmode = {
index ed39b7237fbb23669c3912fd2d2f6e198d1bc3f2..fd8fd767c8110fe02a9c5714f1020dc31a712c30 100644 (file)
@@ -115,7 +115,6 @@ static struct rte_mempool *mbuf_pool;
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .rx_adv_conf = {
index d0f40a1fb4bc7224646905b5e31e412d0701a394..8c4a8feec0c2d19c77c53808b33ff640065f76be 100644 (file)
@@ -81,7 +81,6 @@ struct app_stats prev_app_stats;
 static const struct rte_eth_conf port_conf_default = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
        },
        .txmode = {
                .mq_mode = ETH_MQ_TX_NONE,
index 5ed0dc73ec6010c0fe0df408b605363a05efce9d..e26be8edf28fb97aec2f18317f99ff4fefa9a43f 100644 (file)
@@ -284,7 +284,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
        static const struct rte_eth_conf port_conf_default = {
                .rxmode = {
                        .mq_mode = ETH_MQ_RX_RSS,
-                       .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                },
                .rx_adv_conf = {
                        .rss_conf = {
index ab8c6d6a0dad36445856002593f10c57fe70e8dc..476b147bdfcc31a2cb5abe6319f962ae739619a3 100644 (file)
@@ -615,7 +615,6 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
        static const struct rte_eth_conf port_conf_default = {
                .rxmode = {
                        .mq_mode = ETH_MQ_RX_RSS,
-                       .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                },
                .rx_adv_conf = {
                        .rss_conf = {
index 65c1d85cf2fb0ed61a6b6ffd6c687d67598f193a..8a43f6ac0f92bd82a8ff423f76a7acdf4f5b2f88 100644 (file)
@@ -59,14 +59,6 @@ static struct{
 } parm_config;
 const char cb_port_delim[] = ":";
 
-/* Ethernet ports configured with default settings using struct. 8< */
-static const struct rte_eth_conf port_conf_default = {
-       .rxmode = {
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
-       },
-};
-/* >8 End of configuration of Ethernet ports. */
-
 /* Creation of flow classifier object. 8< */
 struct flow_classifier {
        struct rte_flow_classifier *cls;
@@ -200,7 +192,7 @@ static struct rte_flow_attr attr;
 static inline int
 port_init(uint8_t port, struct rte_mempool *mbuf_pool)
 {
-       struct rte_eth_conf port_conf = port_conf_default;
+       struct rte_eth_conf port_conf;
        struct rte_ether_addr addr;
        const uint16_t rx_rings = 1, tx_rings = 1;
        int retval;
@@ -211,6 +203,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
        if (!rte_eth_dev_is_valid_port(port))
                return -1;
 
+       memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
        retval = rte_eth_dev_info_get(port, &dev_info);
        if (retval != 0) {
                printf("Error during getting device (port %u) info: %s\n",
index ff36aa7f1e7b5a1785dbe6732b4f235a4110a3a7..ccfee585f850de0501b3f893cff31244b7752b13 100644 (file)
@@ -820,7 +820,6 @@ port_init(uint16_t portid, struct rte_mempool *mbuf_pool, uint16_t nb_queues)
        static const struct rte_eth_conf port_conf = {
                .rxmode = {
                        .mq_mode = ETH_MQ_RX_RSS,
-                       .max_rx_pkt_len = RTE_ETHER_MAX_LEN
                },
                .rx_adv_conf = {
                        .rss_conf = {
index cc338f4269b09c069b780d7a2ed7ab8fc53135b4..030641241fec0711754ce35a428e73392937da06 100644 (file)
@@ -145,7 +145,8 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
        .rxmode = {
-               .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
+               .mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
+                       RTE_ETHER_CRC_LEN,
                .split_hdr_size = 0,
                .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
                             DEV_RX_OFFLOAD_SCATTER |
@@ -917,9 +918,9 @@ main(int argc, char **argv)
                                "Error during getting device (port %u) info: %s\n",
                                portid, strerror(-ret));
 
-               local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
-                   dev_info.max_rx_pktlen,
-                   local_port_conf.rxmode.max_rx_pkt_len);
+               local_port_conf.rxmode.mtu = RTE_MIN(
+                   dev_info.max_mtu,
+                   local_port_conf.rxmode.mtu);
 
                /* get the lcore_id for this port */
                while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
@@ -962,8 +963,7 @@ main(int argc, char **argv)
                }
 
                /* set the mtu to the maximum received packet size */
-               ret = rte_eth_dev_set_mtu(portid,
-                       local_port_conf.rxmode.max_rx_pkt_len - MTU_OVERHEAD);
+               ret = rte_eth_dev_set_mtu(portid, local_port_conf.rxmode.mtu);
                if (ret < 0) {
                        printf("\n");
                        rte_exit(EXIT_FAILURE, "Set MTU failed: "
index 16bcffe356bcb2a06288484168403ec4f35260c5..9ba02e687adb0ab33fd5e3e8d1bda47420ef6ca2 100644 (file)
@@ -46,7 +46,7 @@ static struct rte_eth_conf port_conf_default = {
        .link_speeds = 0,
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
-               .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+               .mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
                .split_hdr_size = 0, /* Header split buffer size */
        },
        .rx_adv_conf = {
index f513e2f879374d8b5869068a819af4dbe4d87ef8..f1ea3a7a69ce361e4a0d22314281bb08fe6ea124 100644 (file)
@@ -161,7 +161,8 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode        = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
+               .mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
+                       RTE_ETHER_CRC_LEN,
                .split_hdr_size = 0,
                .offloads = (DEV_RX_OFFLOAD_CHECKSUM |
                             DEV_RX_OFFLOAD_JUMBO_FRAME),
@@ -881,7 +882,8 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
 
        /* mbufs stored int the gragment table. 8< */
        nb_mbuf = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST) * MAX_FRAG_NUM;
-       nb_mbuf *= (port_conf.rxmode.max_rx_pkt_len + BUF_SIZE - 1) / BUF_SIZE;
+       nb_mbuf *= (port_conf.rxmode.mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN
+                       + BUF_SIZE - 1) / BUF_SIZE;
        nb_mbuf *= 2; /* ipv4 and ipv6 */
        nb_mbuf += nb_rxd + nb_txd;
 
@@ -1053,9 +1055,9 @@ main(int argc, char **argv)
                                "Error during getting device (port %u) info: %s\n",
                                portid, strerror(-ret));
 
-               local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
-                   dev_info.max_rx_pktlen,
-                   local_port_conf.rxmode.max_rx_pkt_len);
+               local_port_conf.rxmode.mtu = RTE_MIN(
+                   dev_info.max_mtu,
+                   local_port_conf.rxmode.mtu);
 
                /* get the lcore_id for this port */
                while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
index 744bb612d66b0590a86f5735021378035c75833c..5d54f5fa5076ffa2d0799c832f16c50d990ea70d 100644 (file)
@@ -234,7 +234,6 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode        = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
                .offloads = DEV_RX_OFFLOAD_CHECKSUM,
        },
@@ -2152,7 +2151,6 @@ cryptodevs_init(uint16_t req_queue_num)
 static void
 port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
 {
-       uint32_t frame_size;
        struct rte_eth_dev_info dev_info;
        struct rte_eth_txconf *txconf;
        uint16_t nb_tx_queue, nb_rx_queue;
@@ -2200,10 +2198,9 @@ port_init(uint16_t portid, uint64_t req_rx_offloads, uint64_t req_tx_offloads)
        printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
                        nb_rx_queue, nb_tx_queue);
 
-       frame_size = MTU_TO_FRAMELEN(mtu_size);
-       if (frame_size > local_port_conf.rxmode.max_rx_pkt_len)
+       if (mtu_size > RTE_ETHER_MTU)
                local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-       local_port_conf.rxmode.max_rx_pkt_len = frame_size;
+       local_port_conf.rxmode.mtu = mtu_size;
 
        if (multi_seg_required()) {
                local_port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER;
index 818843c8b6c8c7251d1aa69ab09ae0cd5b90853e..eda7bd573d4d829a7924b86a31a9ae76fdefb104 100644 (file)
@@ -109,7 +109,8 @@ static struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 
 static struct rte_eth_conf port_conf = {
        .rxmode = {
-               .max_rx_pkt_len = JUMBO_FRAME_MAX_SIZE,
+               .mtu = JUMBO_FRAME_MAX_SIZE - RTE_ETHER_HDR_LEN -
+                       RTE_ETHER_CRC_LEN,
                .split_hdr_size = 0,
                .offloads = DEV_RX_OFFLOAD_JUMBO_FRAME,
        },
@@ -714,9 +715,9 @@ main(int argc, char **argv)
                                "Error during getting device (port %u) info: %s\n",
                                portid, strerror(-ret));
 
-               local_port_conf.rxmode.max_rx_pkt_len = RTE_MIN(
-                   dev_info.max_rx_pktlen,
-                   local_port_conf.rxmode.max_rx_pkt_len);
+               local_port_conf.rxmode.mtu = RTE_MIN(
+                   dev_info.max_mtu,
+                   local_port_conf.rxmode.mtu);
 
                /* get the lcore_id for this port */
                while (rte_lcore_is_enabled(rx_lcore_id) == 0 ||
index 4d049d844e0bd04c35d58e850562046829b55560..26d67ccbb1b129a1e8465406a51953e5fb653dc3 100644 (file)
@@ -790,14 +790,12 @@ kni_change_mtu_(uint16_t port_id, unsigned int new_mtu)
 
        memcpy(&conf, &port_conf, sizeof(conf));
        /* Set new MTU */
-       if (new_mtu > RTE_ETHER_MAX_LEN)
+       if (new_mtu > RTE_ETHER_MTU)
                conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
                conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
-       /* mtu + length of header + length of FCS = max pkt length */
-       conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
-                                                       KNI_ENET_FCS_SIZE;
+       conf.rxmode.mtu = new_mtu;
        ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
        if (ret < 0) {
                RTE_LOG(ERR, APP, "Fail to reconfigure port %d\n", port_id);
index 9b3e324efb230d656bab14de7007dc25ec92fc33..d9cf00c9dfc79f68433a9364e500fd61a4cd27c9 100644 (file)
 #define MBUF_CACHE_SIZE 250
 #define BURST_SIZE 32
 
-static const struct rte_eth_conf port_conf_default = {
-       .rxmode = { .max_rx_pkt_len = RTE_ETHER_MAX_LEN }
-};
-
 /* l2fwd-cat.c: CAT enabled, basic DPDK skeleton forwarding example. */
 
 /*
@@ -32,7 +28,7 @@ static const struct rte_eth_conf port_conf_default = {
 static inline int
 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 {
-       struct rte_eth_conf port_conf = port_conf_default;
+       struct rte_eth_conf port_conf;
        const uint16_t rx_rings = 1, tx_rings = 1;
        int retval;
        uint16_t q;
@@ -42,6 +38,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
        if (!rte_eth_dev_is_valid_port(port))
                return -1;
 
+       memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
        /* Configure the Ethernet device. */
        retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
        if (retval != 0)
index 256a29bdf096d57deb5f3d98593790f553a40a66..6e2016752fca7c7aec0f66ef26a06847a2e80f46 100644 (file)
@@ -216,7 +216,6 @@ struct lcore_queue_conf lcore_queue_conf[RTE_MAX_LCORE];
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .txmode = {
index 19f32809aa9d6e72238f09b0b1828c5e0efdab20..9040be5ed9b6b1cf2f49b8b2e0b729044112fb2e 100644 (file)
@@ -11,7 +11,6 @@ l2fwd_event_init_ports(struct l2fwd_resources *rsrc)
        uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT;
        struct rte_eth_conf port_conf = {
                .rxmode = {
-                       .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                        .split_hdr_size = 0,
                },
                .txmode = {
index add5e3c7c4593c08f6c3acb364bcbbacff4734a6..2205063c2e7a72cdb12cc1582d15018f9e082197 100644 (file)
@@ -124,7 +124,6 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode        = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
                .offloads = DEV_RX_OFFLOAD_CHECKSUM,
        },
@@ -140,6 +139,8 @@ static struct rte_eth_conf port_conf = {
        },
 };
 
+static uint32_t max_pkt_len;
+
 static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
 
 /* ethernet addresses of ports */
@@ -200,8 +201,8 @@ enum {
        OPT_CONFIG_NUM = 256,
 #define OPT_NONUMA      "no-numa"
        OPT_NONUMA_NUM,
-#define OPT_ENBJMO      "enable-jumbo"
-       OPT_ENBJMO_NUM,
+#define OPT_MAX_PKT_LEN "max-pkt-len"
+       OPT_MAX_PKT_LEN_NUM,
 #define OPT_RULE_IPV4   "rule_ipv4"
        OPT_RULE_IPV4_NUM,
 #define OPT_RULE_IPV6  "rule_ipv6"
@@ -1619,26 +1620,21 @@ print_usage(const char *prgname)
 
        usage_acl_alg(alg, sizeof(alg));
        printf("%s [EAL options] -- -p PORTMASK -P"
-               "--"OPT_RULE_IPV4"=FILE"
-               "--"OPT_RULE_IPV6"=FILE"
+               "  --"OPT_RULE_IPV4"=FILE"
+               "  --"OPT_RULE_IPV6"=FILE"
                "  [--"OPT_CONFIG" (port,queue,lcore)[,(port,queue,lcore]]"
-               "  [--"OPT_ENBJMO" [--max-pkt-len PKTLEN]]\n"
+               "  [--"OPT_MAX_PKT_LEN" PKTLEN]\n"
                "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
-               "  -P : enable promiscuous mode\n"
-               "  --"OPT_CONFIG": (port,queue,lcore): "
-               "rx queues configuration\n"
+               "  -P: enable promiscuous mode\n"
+               "  --"OPT_CONFIG" (port,queue,lcore): rx queues configuration\n"
                "  --"OPT_NONUMA": optional, disable numa awareness\n"
-               "  --"OPT_ENBJMO": enable jumbo frame"
-               " which max packet len is PKTLEN in decimal (64-9600)\n"
-               "  --"OPT_RULE_IPV4"=FILE: specify the ipv4 rules entries "
-               "file. "
+               "  --"OPT_MAX_PKT_LEN" PKTLEN: maximum packet length in decimal (64-9600)\n"
+               "  --"OPT_RULE_IPV4"=FILE: specify the ipv4 rules entries file. "
                "Each rule occupy one line. "
                "2 kinds of rules are supported. "
                "One is ACL entry at while line leads with character '%c', "
-               "another is route entry at while line leads with "
-               "character '%c'.\n"
-               "  --"OPT_RULE_IPV6"=FILE: specify the ipv6 rules "
-               "entries file.\n"
+               "another is route entry at while line leads with character '%c'.\n"
+               "  --"OPT_RULE_IPV6"=FILE: specify the ipv6 rules entries file.\n"
                "  --"OPT_ALG": ACL classify method to use, one of: %s\n",
                prgname, ACL_LEAD_CHAR, ROUTE_LEAD_CHAR, alg);
 }
@@ -1759,14 +1755,14 @@ parse_args(int argc, char **argv)
        int option_index;
        char *prgname = argv[0];
        static struct option lgopts[] = {
-               {OPT_CONFIG,    1, NULL, OPT_CONFIG_NUM    },
-               {OPT_NONUMA,    0, NULL, OPT_NONUMA_NUM    },
-               {OPT_ENBJMO,    0, NULL, OPT_ENBJMO_NUM    },
-               {OPT_RULE_IPV4, 1, NULL, OPT_RULE_IPV4_NUM },
-               {OPT_RULE_IPV6, 1, NULL, OPT_RULE_IPV6_NUM },
-               {OPT_ALG,       1, NULL, OPT_ALG_NUM       },
-               {OPT_ETH_DEST,  1, NULL, OPT_ETH_DEST_NUM  },
-               {NULL,          0, 0,    0                 }
+               {OPT_CONFIG,      1, NULL, OPT_CONFIG_NUM      },
+               {OPT_NONUMA,      0, NULL, OPT_NONUMA_NUM      },
+               {OPT_MAX_PKT_LEN, 1, NULL, OPT_MAX_PKT_LEN_NUM },
+               {OPT_RULE_IPV4,   1, NULL, OPT_RULE_IPV4_NUM   },
+               {OPT_RULE_IPV6,   1, NULL, OPT_RULE_IPV6_NUM   },
+               {OPT_ALG,         1, NULL, OPT_ALG_NUM         },
+               {OPT_ETH_DEST,    1, NULL, OPT_ETH_DEST_NUM    },
+               {NULL,            0, 0,    0                   }
        };
 
        argvopt = argv;
@@ -1805,43 +1801,11 @@ parse_args(int argc, char **argv)
                        numa_on = 0;
                        break;
 
-               case OPT_ENBJMO_NUM:
-               {
-                       struct option lenopts = {
-                               "max-pkt-len",
-                               required_argument,
-                               0,
-                               0
-                       };
-
-                       printf("jumbo frame is enabled\n");
-                       port_conf.rxmode.offloads |=
-                                       DEV_RX_OFFLOAD_JUMBO_FRAME;
-                       port_conf.txmode.offloads |=
-                                       DEV_TX_OFFLOAD_MULTI_SEGS;
-
-                       /*
-                        * if no max-pkt-len set, then use the
-                        * default value RTE_ETHER_MAX_LEN
-                        */
-                       if (getopt_long(argc, argvopt, "",
-                                       &lenopts, &option_index) == 0) {
-                               ret = parse_max_pkt_len(optarg);
-                               if ((ret < 64) ||
-                                       (ret > MAX_JUMBO_PKT_LEN)) {
-                                       printf("invalid packet "
-                                               "length\n");
-                                       print_usage(prgname);
-                                       return -1;
-                               }
-                               port_conf.rxmode.max_rx_pkt_len = ret;
-                       }
-                       printf("set jumbo frame max packet length "
-                               "to %u\n",
-                               (unsigned int)
-                               port_conf.rxmode.max_rx_pkt_len);
+               case OPT_MAX_PKT_LEN_NUM:
+                       printf("Custom frame size is configured\n");
+                       max_pkt_len = parse_max_pkt_len(optarg);
                        break;
-               }
+
                case OPT_RULE_IPV4_NUM:
                        parm_config.rule_ipv4_name = optarg;
                        break;
@@ -2009,6 +1973,43 @@ set_default_dest_mac(void)
        }
 }
 
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+       uint32_t overhead_len;
+
+       if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+               overhead_len = max_rx_pktlen - max_mtu;
+       else
+               overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+       return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+               struct rte_eth_dev_info *dev_info)
+{
+       uint32_t overhead_len;
+
+       if (max_pkt_len == 0)
+               return 0;
+
+       if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+               return -1;
+
+       overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+                       dev_info->max_mtu);
+       conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+       if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+               conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+               conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       }
+
+       return 0;
+}
+
 int
 main(int argc, char **argv)
 {
@@ -2082,6 +2083,12 @@ main(int argc, char **argv)
                                "Error during getting device (port %u) info: %s\n",
                                portid, strerror(-ret));
 
+               ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+               if (ret != 0)
+                       rte_exit(EXIT_FAILURE,
+                               "Invalid max packet length: %u (port %u)\n",
+                               max_pkt_len, portid);
+
                if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
                        local_port_conf.txmode.offloads |=
                                DEV_TX_OFFLOAD_MBUF_FAST_FREE;
index a0de8ca9b42dd5411e94fc052484d229e7f39995..46568eba9c01b02f47b9b83caa2bd69f72894832 100644 (file)
@@ -112,7 +112,6 @@ static uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .rx_adv_conf = {
@@ -126,6 +125,8 @@ static struct rte_eth_conf port_conf = {
        },
 };
 
+static uint32_t max_pkt_len;
+
 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
 
 static struct rte_node_ethdev_config ethdev_conf[RTE_MAX_ETHPORTS];
@@ -259,7 +260,7 @@ print_usage(const char *prgname)
                " [-P]"
                " --config (port,queue,lcore)[,(port,queue,lcore)]"
                " [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
-               " [--enable-jumbo [--max-pkt-len PKTLEN]]"
+               " [--max-pkt-len PKTLEN]"
                " [--no-numa]"
                " [--per-port-pool]\n\n"
 
@@ -268,9 +269,7 @@ print_usage(const char *prgname)
                "  --config (port,queue,lcore): Rx queue configuration\n"
                "  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for "
                "port X\n"
-               "  --enable-jumbo: Enable jumbo frames\n"
-               "  --max-pkt-len: Under the premise of enabling jumbo,\n"
-               "                 maximum packet length in decimal (64-9600)\n"
+               "  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
                "  --no-numa: Disable numa awareness\n"
                "  --per-port-pool: Use separate buffer pool per port\n\n",
                prgname);
@@ -404,7 +403,7 @@ static const char short_options[] = "p:" /* portmask */
 #define CMD_LINE_OPT_CONFIG       "config"
 #define CMD_LINE_OPT_ETH_DEST     "eth-dest"
 #define CMD_LINE_OPT_NO_NUMA      "no-numa"
-#define CMD_LINE_OPT_ENABLE_JUMBO  "enable-jumbo"
+#define CMD_LINE_OPT_MAX_PKT_LEN   "max-pkt-len"
 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
 enum {
        /* Long options mapped to a short option */
@@ -416,7 +415,7 @@ enum {
        CMD_LINE_OPT_CONFIG_NUM,
        CMD_LINE_OPT_ETH_DEST_NUM,
        CMD_LINE_OPT_NO_NUMA_NUM,
-       CMD_LINE_OPT_ENABLE_JUMBO_NUM,
+       CMD_LINE_OPT_MAX_PKT_LEN_NUM,
        CMD_LINE_OPT_PARSE_PER_PORT_POOL,
 };
 
@@ -424,7 +423,7 @@ static const struct option lgopts[] = {
        {CMD_LINE_OPT_CONFIG, 1, 0, CMD_LINE_OPT_CONFIG_NUM},
        {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
        {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
-       {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
+       {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
        {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
        {NULL, 0, 0, 0},
 };
@@ -490,28 +489,8 @@ parse_args(int argc, char **argv)
                        numa_on = 0;
                        break;
 
-               case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
-                       const struct option lenopts = {"max-pkt-len",
-                                                      required_argument, 0, 0};
-
-                       port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-                       port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
-
-                       /*
-                        * if no max-pkt-len set, use the default
-                        * value RTE_ETHER_MAX_LEN.
-                        */
-                       if (getopt_long(argc, argvopt, "", &lenopts,
-                                       &option_index) == 0) {
-                               ret = parse_max_pkt_len(optarg);
-                               if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
-                                       fprintf(stderr, "Invalid maximum "
-                                                       "packet length\n");
-                                       print_usage(prgname);
-                                       return -1;
-                               }
-                               port_conf.rxmode.max_rx_pkt_len = ret;
-                       }
+               case CMD_LINE_OPT_MAX_PKT_LEN_NUM: {
+                       max_pkt_len = parse_max_pkt_len(optarg);
                        break;
                }
 
@@ -722,6 +701,43 @@ graph_main_loop(void *conf)
 }
 /* >8 End of main processing loop. */
 
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+       uint32_t overhead_len;
+
+       if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+               overhead_len = max_rx_pktlen - max_mtu;
+       else
+               overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+       return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+               struct rte_eth_dev_info *dev_info)
+{
+       uint32_t overhead_len;
+
+       if (max_pkt_len == 0)
+               return 0;
+
+       if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+               return -1;
+
+       overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+                       dev_info->max_mtu);
+       conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+       if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+               conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+               conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       }
+
+       return 0;
+}
+
 int
 main(int argc, char **argv)
 {
@@ -807,6 +823,13 @@ main(int argc, char **argv)
                       nb_rx_queue, n_tx_queue);
 
                rte_eth_dev_info_get(portid, &dev_info);
+
+               ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+               if (ret != 0)
+                       rte_exit(EXIT_FAILURE,
+                               "Invalid max packet length: %u (port %u)\n",
+                               max_pkt_len, portid);
+
                if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
                        local_port_conf.txmode.offloads |=
                                DEV_TX_OFFLOAD_MBUF_FAST_FREE;
index be0d4f16168520e41b1cb749aabb5a2ee0d26312..9402644d6fc5e3b3685e28644fe60ca1597fbb30 100644 (file)
@@ -250,7 +250,6 @@ uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode        = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
                .offloads = DEV_RX_OFFLOAD_CHECKSUM,
        },
@@ -265,6 +264,8 @@ static struct rte_eth_conf port_conf = {
        }
 };
 
+static uint32_t max_pkt_len;
+
 static struct rte_mempool * pktmbuf_pool[NB_SOCKETS];
 
 
@@ -1600,16 +1601,15 @@ print_usage(const char *prgname)
                "  [--config (port,queue,lcore)[,(port,queue,lcore]]"
                "  [--high-perf-cores CORELIST"
                "  [--perf-config (port,queue,hi_perf,lcore_index)[,(port,queue,hi_perf,lcore_index]]"
-               "  [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
+               "  [--max-pkt-len PKTLEN]\n"
                "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
-               "  -P : enable promiscuous mode\n"
+               "  -P: enable promiscuous mode\n"
                "  --config (port,queue,lcore): rx queues configuration\n"
                "  --high-perf-cores CORELIST: list of high performance cores\n"
                "  --perf-config: similar as config, cores specified as indices"
                " for bins containing high or regular performance cores\n"
                "  --no-numa: optional, disable numa awareness\n"
-               "  --enable-jumbo: enable jumbo frame"
-               " which max packet len is PKTLEN in decimal (64-9600)\n"
+               "  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
                "  --parse-ptype: parse packet type by software\n"
                "  --legacy: use legacy interrupt-based scaling\n"
                "  --empty-poll: enable empty poll detection"
@@ -1794,6 +1794,7 @@ parse_ep_config(const char *q_arg)
 #define CMD_LINE_OPT_INTERRUPT_ONLY "interrupt-only"
 #define CMD_LINE_OPT_TELEMETRY "telemetry"
 #define CMD_LINE_OPT_PMD_MGMT "pmd-mgmt"
+#define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
 
 /* Parse the argument given in the command line of the application */
 static int
@@ -1809,7 +1810,7 @@ parse_args(int argc, char **argv)
                {"perf-config", 1, 0, 0},
                {"high-perf-cores", 1, 0, 0},
                {"no-numa", 0, 0, 0},
-               {"enable-jumbo", 0, 0, 0},
+               {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, 0},
                {CMD_LINE_OPT_EMPTY_POLL, 1, 0, 0},
                {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, 0},
                {CMD_LINE_OPT_LEGACY, 0, 0, 0},
@@ -1953,36 +1954,10 @@ parse_args(int argc, char **argv)
                        }
 
                        if (!strncmp(lgopts[option_index].name,
-                                       "enable-jumbo", 12)) {
-                               struct option lenopts =
-                                       {"max-pkt-len", required_argument, \
-                                                                       0, 0};
-
-                               printf("jumbo frame is enabled \n");
-                               port_conf.rxmode.offloads |=
-                                               DEV_RX_OFFLOAD_JUMBO_FRAME;
-                               port_conf.txmode.offloads |=
-                                               DEV_TX_OFFLOAD_MULTI_SEGS;
-
-                               /**
-                                * if no max-pkt-len set, use the default value
-                                * RTE_ETHER_MAX_LEN
-                                */
-                               if (0 == getopt_long(argc, argvopt, "",
-                                               &lenopts, &option_index)) {
-                                       ret = parse_max_pkt_len(optarg);
-                                       if ((ret < 64) ||
-                                               (ret > MAX_JUMBO_PKT_LEN)){
-                                               printf("invalid packet "
-                                                               "length\n");
-                                               print_usage(prgname);
-                                               return -1;
-                                       }
-                                       port_conf.rxmode.max_rx_pkt_len = ret;
-                               }
-                               printf("set jumbo frame "
-                                       "max packet length to %u\n",
-                               (unsigned int)port_conf.rxmode.max_rx_pkt_len);
+                                       CMD_LINE_OPT_MAX_PKT_LEN,
+                                       sizeof(CMD_LINE_OPT_MAX_PKT_LEN))) {
+                               printf("Custom frame size is configured\n");
+                               max_pkt_len = parse_max_pkt_len(optarg);
                        }
 
                        if (!strncmp(lgopts[option_index].name,
@@ -2504,6 +2479,43 @@ mode_to_str(enum appmode mode)
        }
 }
 
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+       uint32_t overhead_len;
+
+       if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+               overhead_len = max_rx_pktlen - max_mtu;
+       else
+               overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+       return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+               struct rte_eth_dev_info *dev_info)
+{
+       uint32_t overhead_len;
+
+       if (max_pkt_len == 0)
+               return 0;
+
+       if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+               return -1;
+
+       overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+                       dev_info->max_mtu);
+       conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+       if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+               conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+               conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       }
+
+       return 0;
+}
+
 /* Power library initialized in the main routine. 8< */
 int
 main(int argc, char **argv)
@@ -2621,6 +2633,12 @@ main(int argc, char **argv)
                                "Error during getting device (port %u) info: %s\n",
                                portid, strerror(-ret));
 
+               ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+               if (ret != 0)
+                       rte_exit(EXIT_FAILURE,
+                               "Invalid max packet length: %u (port %u)\n",
+                               max_pkt_len, portid);
+
                if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
                        local_port_conf.txmode.offloads |=
                                DEV_TX_OFFLOAD_MBUF_FAST_FREE;
index 518fefe90b900d49dc3654fbbb04a99fe9f2d3dd..7701302c75628a633bd1a1f07c7f67215280fb9a 100644 (file)
@@ -120,7 +120,6 @@ static uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
                .offloads = DEV_RX_OFFLOAD_CHECKSUM,
        },
@@ -135,6 +134,8 @@ static struct rte_eth_conf port_conf = {
        },
 };
 
+static uint32_t max_pkt_len;
+
 static struct rte_mempool *pktmbuf_pool[RTE_MAX_ETHPORTS][NB_SOCKETS];
 static uint8_t lkp_per_socket[NB_SOCKETS];
 
@@ -325,7 +326,7 @@ print_usage(const char *prgname)
                " [--lookup]"
                " --config (port,queue,lcore)[,(port,queue,lcore)]"
                " [--eth-dest=X,MM:MM:MM:MM:MM:MM]"
-               " [--enable-jumbo [--max-pkt-len PKTLEN]]"
+               " [--max-pkt-len PKTLEN]"
                " [--no-numa]"
                " [--hash-entry-num]"
                " [--ipv6]"
@@ -343,9 +344,7 @@ print_usage(const char *prgname)
                "            Accepted: em (Exact Match), lpm (Longest Prefix Match), fib (Forwarding Information Base)\n"
                "  --config (port,queue,lcore): Rx queue configuration\n"
                "  --eth-dest=X,MM:MM:MM:MM:MM:MM: Ethernet destination for port X\n"
-               "  --enable-jumbo: Enable jumbo frames\n"
-               "  --max-pkt-len: Under the premise of enabling jumbo,\n"
-               "                 maximum packet length in decimal (64-9600)\n"
+               "  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
                "  --no-numa: Disable numa awareness\n"
                "  --hash-entry-num: Specify the hash entry number in hexadecimal to be setup\n"
                "  --ipv6: Set if running ipv6 packets\n"
@@ -565,7 +564,7 @@ static const char short_options[] =
 #define CMD_LINE_OPT_ETH_DEST "eth-dest"
 #define CMD_LINE_OPT_NO_NUMA "no-numa"
 #define CMD_LINE_OPT_IPV6 "ipv6"
-#define CMD_LINE_OPT_ENABLE_JUMBO "enable-jumbo"
+#define CMD_LINE_OPT_MAX_PKT_LEN "max-pkt-len"
 #define CMD_LINE_OPT_HASH_ENTRY_NUM "hash-entry-num"
 #define CMD_LINE_OPT_PARSE_PTYPE "parse-ptype"
 #define CMD_LINE_OPT_PER_PORT_POOL "per-port-pool"
@@ -583,7 +582,7 @@ enum {
        CMD_LINE_OPT_ETH_DEST_NUM,
        CMD_LINE_OPT_NO_NUMA_NUM,
        CMD_LINE_OPT_IPV6_NUM,
-       CMD_LINE_OPT_ENABLE_JUMBO_NUM,
+       CMD_LINE_OPT_MAX_PKT_LEN_NUM,
        CMD_LINE_OPT_HASH_ENTRY_NUM_NUM,
        CMD_LINE_OPT_PARSE_PTYPE_NUM,
        CMD_LINE_OPT_PARSE_PER_PORT_POOL,
@@ -598,7 +597,7 @@ static const struct option lgopts[] = {
        {CMD_LINE_OPT_ETH_DEST, 1, 0, CMD_LINE_OPT_ETH_DEST_NUM},
        {CMD_LINE_OPT_NO_NUMA, 0, 0, CMD_LINE_OPT_NO_NUMA_NUM},
        {CMD_LINE_OPT_IPV6, 0, 0, CMD_LINE_OPT_IPV6_NUM},
-       {CMD_LINE_OPT_ENABLE_JUMBO, 0, 0, CMD_LINE_OPT_ENABLE_JUMBO_NUM},
+       {CMD_LINE_OPT_MAX_PKT_LEN, 1, 0, CMD_LINE_OPT_MAX_PKT_LEN_NUM},
        {CMD_LINE_OPT_HASH_ENTRY_NUM, 1, 0, CMD_LINE_OPT_HASH_ENTRY_NUM_NUM},
        {CMD_LINE_OPT_PARSE_PTYPE, 0, 0, CMD_LINE_OPT_PARSE_PTYPE_NUM},
        {CMD_LINE_OPT_PER_PORT_POOL, 0, 0, CMD_LINE_OPT_PARSE_PER_PORT_POOL},
@@ -697,31 +696,9 @@ parse_args(int argc, char **argv)
                        ipv6 = 1;
                        break;
 
-               case CMD_LINE_OPT_ENABLE_JUMBO_NUM: {
-                       const struct option lenopts = {
-                               "max-pkt-len", required_argument, 0, 0
-                       };
-
-                       port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
-                       port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
-
-                       /*
-                        * if no max-pkt-len set, use the default
-                        * value RTE_ETHER_MAX_LEN.
-                        */
-                       if (getopt_long(argc, argvopt, "",
-                                       &lenopts, &option_index) == 0) {
-                               ret = parse_max_pkt_len(optarg);
-                               if (ret < 64 || ret > MAX_JUMBO_PKT_LEN) {
-                                       fprintf(stderr,
-                                               "invalid maximum packet length\n");
-                                       print_usage(prgname);
-                                       return -1;
-                               }
-                               port_conf.rxmode.max_rx_pkt_len = ret;
-                       }
+               case CMD_LINE_OPT_MAX_PKT_LEN_NUM:
+                       max_pkt_len = parse_max_pkt_len(optarg);
                        break;
-               }
 
                case CMD_LINE_OPT_HASH_ENTRY_NUM_NUM:
                        ret = parse_hash_entry_number(optarg);
@@ -980,6 +957,43 @@ prepare_ptype_parser(uint16_t portid, uint16_t queueid)
        return 0;
 }
 
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+       uint32_t overhead_len;
+
+       if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+               overhead_len = max_rx_pktlen - max_mtu;
+       else
+               overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+       return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+               struct rte_eth_dev_info *dev_info)
+{
+       uint32_t overhead_len;
+
+       if (max_pkt_len == 0)
+               return 0;
+
+       if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+               return -1;
+
+       overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+                       dev_info->max_mtu);
+       conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+       if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+               conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+               conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       }
+
+       return 0;
+}
+
 static void
 l3fwd_poll_resource_setup(void)
 {
@@ -1034,6 +1048,12 @@ l3fwd_poll_resource_setup(void)
                                "Error during getting device (port %u) info: %s\n",
                                portid, strerror(-ret));
 
+               ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+               if (ret != 0)
+                       rte_exit(EXIT_FAILURE,
+                               "Invalid max packet length: %u (port %u)\n",
+                               max_pkt_len, portid);
+
                if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
                        local_port_conf.txmode.offloads |=
                                DEV_TX_OFFLOAD_MBUF_FAST_FREE;
index 50ecc4e820f6eec0e81fc05125ca9e94c158325e..69a12205bcc04742fddb862ac234cb73ee44dab2 100644 (file)
@@ -307,7 +307,6 @@ static uint16_t nb_tx_thread_params = RTE_DIM(tx_thread_params_array_default);
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
                .offloads = DEV_RX_OFFLOAD_CHECKSUM,
        },
@@ -322,6 +321,8 @@ static struct rte_eth_conf port_conf = {
        },
 };
 
+static uint32_t max_pkt_len;
+
 static struct rte_mempool *pktmbuf_pool[NB_SOCKETS];
 
 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
@@ -2639,7 +2640,7 @@ print_usage(const char *prgname)
        printf("%s [EAL options] -- -p PORTMASK -P"
                "  [--rx (port,queue,lcore,thread)[,(port,queue,lcore,thread]]"
                "  [--tx (lcore,thread)[,(lcore,thread]]"
-               "  [--enable-jumbo [--max-pkt-len PKTLEN]]\n"
+               "  [--max-pkt-len PKTLEN]"
                "  [--parse-ptype]\n\n"
                "  -p PORTMASK: hexadecimal bitmask of ports to configure\n"
                "  -P : enable promiscuous mode\n"
@@ -2649,8 +2650,7 @@ print_usage(const char *prgname)
                "  --eth-dest=X,MM:MM:MM:MM:MM:MM: optional, ethernet destination for port X\n"
                "  --no-numa: optional, disable numa awareness\n"
                "  --ipv6: optional, specify it if running ipv6 packets\n"
-               "  --enable-jumbo: enable jumbo frame"
-               " which max packet len is PKTLEN in decimal (64-9600)\n"
+               "  --max-pkt-len PKTLEN: maximum packet length in decimal (64-9600)\n"
                "  --hash-entry-num: specify the hash entry number in hexadecimal to be setup\n"
                "  --no-lthreads: turn off lthread model\n"
                "  --parse-ptype: set to use software to analyze packet type\n\n",
@@ -2873,8 +2873,8 @@ enum {
        OPT_NO_NUMA_NUM,
 #define OPT_IPV6            "ipv6"
        OPT_IPV6_NUM,
-#define OPT_ENABLE_JUMBO    "enable-jumbo"
-       OPT_ENABLE_JUMBO_NUM,
+#define OPT_MAX_PKT_LEN "max-pkt-len"
+       OPT_MAX_PKT_LEN_NUM,
 #define OPT_HASH_ENTRY_NUM  "hash-entry-num"
        OPT_HASH_ENTRY_NUM_NUM,
 #define OPT_NO_LTHREADS     "no-lthreads"
@@ -2898,7 +2898,7 @@ parse_args(int argc, char **argv)
                {OPT_ETH_DEST,       1, NULL, OPT_ETH_DEST_NUM       },
                {OPT_NO_NUMA,        0, NULL, OPT_NO_NUMA_NUM        },
                {OPT_IPV6,           0, NULL, OPT_IPV6_NUM           },
-               {OPT_ENABLE_JUMBO,   0, NULL, OPT_ENABLE_JUMBO_NUM   },
+               {OPT_MAX_PKT_LEN,    1, NULL, OPT_MAX_PKT_LEN_NUM    },
                {OPT_HASH_ENTRY_NUM, 1, NULL, OPT_HASH_ENTRY_NUM_NUM },
                {OPT_NO_LTHREADS,    0, NULL, OPT_NO_LTHREADS_NUM    },
                {OPT_PARSE_PTYPE,    0, NULL, OPT_PARSE_PTYPE_NUM    },
@@ -2977,35 +2977,10 @@ parse_args(int argc, char **argv)
                        parse_ptype_on = 1;
                        break;
 
-               case OPT_ENABLE_JUMBO_NUM:
-               {
-                       struct option lenopts = {"max-pkt-len",
-                                       required_argument, 0, 0};
-
-                       printf("jumbo frame is enabled - disabling simple TX path\n");
-                       port_conf.rxmode.offloads |=
-                                       DEV_RX_OFFLOAD_JUMBO_FRAME;
-                       port_conf.txmode.offloads |=
-                                       DEV_TX_OFFLOAD_MULTI_SEGS;
-
-                       /* if no max-pkt-len set, use the default value
-                        * RTE_ETHER_MAX_LEN
-                        */
-                       if (getopt_long(argc, argvopt, "", &lenopts,
-                                       &option_index) == 0) {
-
-                               ret = parse_max_pkt_len(optarg);
-                               if ((ret < 64) || (ret > MAX_JUMBO_PKT_LEN)) {
-                                       printf("invalid packet length\n");
-                                       print_usage(prgname);
-                                       return -1;
-                               }
-                               port_conf.rxmode.max_rx_pkt_len = ret;
-                       }
-                       printf("set jumbo frame max packet length to %u\n",
-                               (unsigned int)port_conf.rxmode.max_rx_pkt_len);
+               case OPT_MAX_PKT_LEN_NUM:
+                       max_pkt_len = parse_max_pkt_len(optarg);
                        break;
-               }
+
 #if (APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH)
                case OPT_HASH_ENTRY_NUM_NUM:
                        ret = parse_hash_entry_number(optarg);
@@ -3485,6 +3460,43 @@ check_all_ports_link_status(uint32_t port_mask)
        }
 }
 
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+       uint32_t overhead_len;
+
+       if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+               overhead_len = max_rx_pktlen - max_mtu;
+       else
+               overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+       return overhead_len;
+}
+
+static int
+config_port_max_pkt_len(struct rte_eth_conf *conf,
+               struct rte_eth_dev_info *dev_info)
+{
+       uint32_t overhead_len;
+
+       if (max_pkt_len == 0)
+               return 0;
+
+       if (max_pkt_len < RTE_ETHER_MIN_LEN || max_pkt_len > MAX_JUMBO_PKT_LEN)
+               return -1;
+
+       overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+                       dev_info->max_mtu);
+       conf->rxmode.mtu = max_pkt_len - overhead_len;
+
+       if (conf->rxmode.mtu > RTE_ETHER_MTU) {
+               conf->txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS;
+               conf->rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       }
+
+       return 0;
+}
+
 int
 main(int argc, char **argv)
 {
@@ -3573,6 +3585,12 @@ main(int argc, char **argv)
                                "Error during getting device (port %u) info: %s\n",
                                portid, strerror(-ret));
 
+               ret = config_port_max_pkt_len(&local_port_conf, &dev_info);
+               if (ret != 0)
+                       rte_exit(EXIT_FAILURE,
+                               "Invalid max packet length: %u (port %u)\n",
+                               max_pkt_len, portid);
+
                if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
                        local_port_conf.txmode.offloads |=
                                DEV_TX_OFFLOAD_MBUF_FAST_FREE;
index f0b6e271a5f3d829f5a43c25676b82981244ae8d..3dd33407ea41e7b4c5b42322a74a6900e4da7f66 100755 (executable)
@@ -11,7 +11,7 @@ case "$1" in
                echo "1.1 1 L-core per pcore (N=2)"
 
                ./build/l3fwd-thread -c ff -n 2 -- -P -p 3 \
-                               --enable-jumbo --max-pkt-len 1500  \
+                               --max-pkt-len 1500  \
                                --rx="(0,0,0,0)(1,0,0,0)"          \
                                --tx="(1,0)"                       \
                                --stat-lcore 2                     \
@@ -23,7 +23,7 @@ case "$1" in
                echo "1.2 1 L-core per pcore (N=4)"
 
                ./build/l3fwd-thread -c ff -n 2 -- -P -p 3 \
-                               --enable-jumbo --max-pkt-len 1500  \
+                               --max-pkt-len 1500  \
                                --rx="(0,0,0,0)(1,0,1,1)"          \
                                --tx="(2,0)(3,1)"                  \
                                --stat-lcore 4                     \
@@ -34,7 +34,7 @@ case "$1" in
                echo "1.3 1 L-core per pcore (N=8)"
 
                ./build/l3fwd-thread -c 1ff -n 2 -- -P -p 3                          \
-                               --enable-jumbo --max-pkt-len 1500                            \
+                               --max-pkt-len 1500                            \
                                --rx="(0,0,0,0)(0,1,1,1)(1,0,2,2)(1,1,3,3)"                  \
                                --tx="(4,0)(5,1)(6,2)(7,3)"                                  \
                                --stat-lcore 8                                               \
@@ -45,7 +45,7 @@ case "$1" in
                echo "1.3 1 L-core per pcore (N=16)"
 
                ./build/l3fwd-thread -c 3ffff -n 2 -- -P -p 3                          \
-                               --enable-jumbo --max-pkt-len 1500                              \
+                               --max-pkt-len 1500                              \
                                --rx="(0,0,0,0)(0,1,1,1)(0,2,2,2)(0,3,3,3)(1,0,4,4)(1,1,5,5)(1,2,6,6)(1,3,7,7)" \
                                --tx="(8,0)(9,1)(10,2)(11,3)(12,4)(13,5)(14,6)(15,7)"          \
                                --stat-lcore 16                                                \
@@ -61,7 +61,7 @@ case "$1" in
                echo "2.1 N L-core per pcore (N=2)"
 
                ./build/l3fwd-thread -c ff -n 2 --lcores="2,(0-1)@0" -- -P -p 3 \
-                               --enable-jumbo --max-pkt-len 1500                       \
+                               --max-pkt-len 1500                       \
                                --rx="(0,0,0,0)(1,0,0,0)"                               \
                                --tx="(1,0)"                                            \
                                --stat-lcore 2                                          \
@@ -73,7 +73,7 @@ case "$1" in
                echo "2.2 N L-core per pcore (N=4)"
 
                ./build/l3fwd-thread -c ff -n 2 --lcores="(0-3)@0,4" -- -P -p 3 \
-                               --enable-jumbo --max-pkt-len 1500  \
+                               --max-pkt-len 1500  \
                                --rx="(0,0,0,0)(1,0,1,1)"          \
                                --tx="(2,0)(3,1)"                  \
                                --stat-lcore 4                     \
@@ -84,7 +84,7 @@ case "$1" in
                echo "2.3 N L-core per pcore (N=8)"
 
                ./build/l3fwd-thread -c 3ffff -n 2 --lcores="(0-7)@0,8" -- -P -p 3     \
-                               --enable-jumbo --max-pkt-len 1500                              \
+                               --max-pkt-len 1500                              \
                                --rx="(0,0,0,0)(0,1,1,1)(1,0,2,2)(1,1,3,3)"                    \
                                --tx="(4,0)(5,1)(6,2)(7,3)"                                    \
                                --stat-lcore 8                                                 \
@@ -95,7 +95,7 @@ case "$1" in
                echo "2.3 N L-core per pcore (N=16)"
 
                ./build/l3fwd-thread -c 3ffff -n 2 --lcores="(0-15)@0,16" -- -P -p 3   \
-                               --enable-jumbo --max-pkt-len 1500                              \
+                               --max-pkt-len 1500                              \
                                --rx="(0,0,0,0)(0,1,1,1)(0,2,2,2)(0,3,3,3)(1,0,4,4)(1,1,5,5)(1,2,6,6)(1,3,7,7)" \
                                --tx="(8,0)(9,1)(10,2)(11,3)(12,4)(13,5)(14,6)(15,7)"          \
                                --stat-lcore 16                                                \
@@ -111,7 +111,7 @@ case "$1" in
                echo "3.1 N L-threads per pcore (N=2)"
 
                ./build/l3fwd-thread -c ff -n 2 -- -P -p 3  \
-                               --enable-jumbo --max-pkt-len 1500   \
+                               --max-pkt-len 1500   \
                                --rx="(0,0,0,0)(1,0,0,0)"           \
                                --tx="(0,0)"                        \
                                --stat-lcore 1
@@ -121,7 +121,7 @@ case "$1" in
                echo "3.2 N L-threads per pcore (N=4)"
 
                ./build/l3fwd-thread -c ff -n 2 -- -P -p 3  \
-                               --enable-jumbo --max-pkt-len 1500   \
+                               --max-pkt-len 1500   \
                                --rx="(0,0,0,0)(1,0,0,1)"           \
                                --tx="(0,0)(0,1)"                   \
                                --stat-lcore 1
@@ -131,7 +131,7 @@ case "$1" in
                echo "3.2 N L-threads per pcore (N=8)"
 
                ./build/l3fwd-thread -c ff -n 2 -- -P -p 3                             \
-                               --enable-jumbo --max-pkt-len 1500                              \
+                               --max-pkt-len 1500                              \
                                --rx="(0,0,0,0)(0,1,0,1)(1,0,0,2)(1,1,0,3)"                    \
                                --tx="(0,0)(0,1)(0,2)(0,3)"                                    \
                                --stat-lcore 1
@@ -141,7 +141,7 @@ case "$1" in
                echo "3.2 N L-threads per pcore (N=16)"
 
                ./build/l3fwd-thread -c ff -n 2 -- -P -p 3                             \
-                               --enable-jumbo --max-pkt-len 1500                              \
+                               --max-pkt-len 1500                              \
                                --rx="(0,0,0,0)(0,1,0,1)(0,2,0,2)(0,0,0,3)(1,0,0,4)(1,1,0,5)(1,2,0,6)(1,3,0,7)" \
                                --tx="(0,0)(0,1)(0,2)(0,3)(0,4)(0,5)(0,6)(0,7)"                \
                                --stat-lcore 1
index 467cda5a6daccd1cc7720cf1d7f346d146307861..4f20dfc4be0631d1a91e0c76c513ed6f7b64b8b9 100644 (file)
@@ -134,7 +134,7 @@ static struct rte_eth_conf port_conf_default = {
        .link_speeds = 0,
        .rxmode = {
                .mq_mode = ETH_MQ_RX_NONE,
-               .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */
+               .mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */
                .split_hdr_size = 0, /* Header split buffer size */
        },
        .rx_adv_conf = {
index d94eca0353d76cc1b0b3ff191d93e0861260b6c5..229a277032cbb5e1f583d0fb698d12e9a7c35080 100644 (file)
@@ -47,12 +47,6 @@ uint32_t ptp_enabled_port_mask;
 uint8_t ptp_enabled_port_nb;
 static uint8_t ptp_enabled_ports[RTE_MAX_ETHPORTS];
 
-static const struct rte_eth_conf port_conf_default = {
-       .rxmode = {
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
-       },
-};
-
 static const struct rte_ether_addr ether_multicast = {
        .addr_bytes = {0x01, 0x1b, 0x19, 0x0, 0x0, 0x0}
 };
@@ -178,7 +172,7 @@ static inline int
 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 {
        struct rte_eth_dev_info dev_info;
-       struct rte_eth_conf port_conf = port_conf_default;
+       struct rte_eth_conf port_conf;
        const uint16_t rx_rings = 1;
        const uint16_t tx_rings = 1;
        int retval;
@@ -189,6 +183,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
        if (!rte_eth_dev_is_valid_port(port))
                return -1;
 
+       memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
        retval = rte_eth_dev_info_get(port, &dev_info);
        if (retval != 0) {
                printf("Error during getting device (port %u) info: %s\n",
index 7ffccc8369dcae173711a53a79c239450c1e72c0..c32d2e12e633fec9a1a3ccf23583e53a86f12406 100644 (file)
@@ -52,7 +52,6 @@ static struct rte_mempool *pool = NULL;
 static struct rte_eth_conf port_conf = {
        .rxmode = {
                .mq_mode        = ETH_MQ_RX_RSS,
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
                .offloads = DEV_RX_OFFLOAD_CHECKSUM,
        },
index 1abe003fc6ae66b2bc73fd977699fc57065da991..1367569c65dba7394990193a89df24d9ba36c596 100644 (file)
@@ -57,7 +57,6 @@ struct flow_conf qos_conf[MAX_DATA_STREAMS];
 
 static struct rte_eth_conf port_conf = {
        .rxmode = {
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
                .split_hdr_size = 0,
        },
        .txmode = {
index ab6fa7d56c5da378d526751ab5a315e013e3fe2d..6845c396b8d92c14fb75aaae858a78811f22ca3f 100644 (file)
@@ -40,12 +40,6 @@ tsc_field(struct rte_mbuf *mbuf)
 static const char usage[] =
        "%s EAL_ARGS -- [-t]\n";
 
-static const struct rte_eth_conf port_conf_default = {
-       .rxmode = {
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
-       },
-};
-
 static struct {
        uint64_t total_cycles;
        uint64_t total_queue_cycles;
@@ -124,7 +118,7 @@ calc_latency(uint16_t port, uint16_t qidx __rte_unused,
 static inline int
 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 {
-       struct rte_eth_conf port_conf = port_conf_default;
+       struct rte_eth_conf port_conf;
        const uint16_t rx_rings = 1, tx_rings = 1;
        uint16_t nb_rxd = RX_RING_SIZE;
        uint16_t nb_txd = TX_RING_SIZE;
@@ -137,6 +131,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
        if (!rte_eth_dev_is_valid_port(port))
                return -1;
 
+       memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
        retval = rte_eth_dev_info_get(port, &dev_info);
        if (retval != 0) {
                printf("Error during getting device (port %u) info: %s\n",
index ae9bbee8d8204a77d7bf3181eba65fa4058c0466..fd7207aee7584001f86fea226289d4083c67d078 100644 (file)
 #define MBUF_CACHE_SIZE 250
 #define BURST_SIZE 32
 
-/* Configuration of ethernet ports. 8<  */
-static const struct rte_eth_conf port_conf_default = {
-       .rxmode = {
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
-       },
-};
-/* >8 End of configuration of ethernet ports. */
-
 /* basicfwd.c: Basic DPDK skeleton forwarding example. */
 
 /*
@@ -36,7 +28,7 @@ static const struct rte_eth_conf port_conf_default = {
 static inline int
 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 {
-       struct rte_eth_conf port_conf = port_conf_default;
+       struct rte_eth_conf port_conf;
        const uint16_t rx_rings = 1, tx_rings = 1;
        uint16_t nb_rxd = RX_RING_SIZE;
        uint16_t nb_txd = TX_RING_SIZE;
@@ -48,6 +40,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
        if (!rte_eth_dev_is_valid_port(port))
                return -1;
 
+       memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
        retval = rte_eth_dev_info_get(port, &dev_info);
        if (retval != 0) {
                printf("Error during getting device (port %u) info: %s\n",
index b24fd82a6e71a9de66530765418a063179a945ef..427b882831bf0a686b7703fec040215c60b17ffd 100644 (file)
@@ -44,6 +44,7 @@
 #define BURST_RX_RETRIES 4             /* Number of retries on RX. */
 
 #define JUMBO_FRAME_MAX_SIZE    0x2600
+#define MAX_MTU (JUMBO_FRAME_MAX_SIZE - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN))
 
 /* State of virtio device. */
 #define DEVICE_MAC_LEARNING 0
@@ -633,8 +634,7 @@ us_vhost_parse_args(int argc, char **argv)
                        if (ret) {
                                vmdq_conf_default.rxmode.offloads |=
                                        DEV_RX_OFFLOAD_JUMBO_FRAME;
-                               vmdq_conf_default.rxmode.max_rx_pkt_len
-                                       = JUMBO_FRAME_MAX_SIZE;
+                               vmdq_conf_default.rxmode.mtu = MAX_MTU;
                        }
                        break;
 
index e59fb7d3478b907b18ccc6e2be5891e3b474181d..e19d79a40802ddba16bb9fd96ef9c20331bbd706 100644 (file)
 static uint32_t enabled_port_mask;
 static volatile bool force_quit;
 
-/****************/
-static const struct rte_eth_conf port_conf_default = {
-       .rxmode = {
-               .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
-       },
-};
-
 static inline int
 port_init(uint16_t port, struct rte_mempool *mbuf_pool)
 {
-       struct rte_eth_conf port_conf = port_conf_default;
+       struct rte_eth_conf port_conf;
        const uint16_t rx_rings = 1, tx_rings = 1;
        int retval;
        uint16_t q;
@@ -71,6 +64,8 @@ port_init(uint16_t port, struct rte_mempool *mbuf_pool)
        if (!rte_eth_dev_is_valid_port(port))
                return -1;
 
+       memset(&port_conf, 0, sizeof(struct rte_eth_conf));
+
        retval = rte_eth_dev_info_get(port, &dev_info);
        if (retval != 0) {
                printf("Error during getting device (port %u) info: %s\n",
index 66f905c822e23ac8e5bf779298c6e1d2f4f7e0d4..8d1ccf6f732c04fc6b89a45521466111ed5714cf 100644 (file)
@@ -1315,6 +1315,19 @@ eth_dev_validate_offloads(uint16_t port_id, uint64_t req_offloads,
        return ret;
 }
 
+static uint32_t
+eth_dev_get_overhead_len(uint32_t max_rx_pktlen, uint16_t max_mtu)
+{
+       uint32_t overhead_len;
+
+       if (max_mtu != UINT16_MAX && max_rx_pktlen > max_mtu)
+               overhead_len = max_rx_pktlen - max_mtu;
+       else
+               overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+       return overhead_len;
+}
+
 int
 rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                      const struct rte_eth_conf *dev_conf)
@@ -1322,7 +1335,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
        struct rte_eth_dev *dev;
        struct rte_eth_dev_info dev_info;
        struct rte_eth_conf orig_conf;
-       uint16_t overhead_len;
+       uint32_t max_rx_pktlen;
+       uint32_t overhead_len;
        int diag;
        int ret;
        uint16_t old_mtu;
@@ -1372,11 +1386,8 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
                goto rollback;
 
        /* Get the real Ethernet overhead length */
-       if (dev_info.max_mtu != UINT16_MAX &&
-           dev_info.max_rx_pktlen > dev_info.max_mtu)
-               overhead_len = dev_info.max_rx_pktlen - dev_info.max_mtu;
-       else
-               overhead_len = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+       overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+                       dev_info.max_mtu);
 
        /* If number of queues specified by application for both Rx and Tx is
         * zero, use driver preferred values. This cannot be done individually
@@ -1445,49 +1456,45 @@ rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
        }
 
        /*
-        * If jumbo frames are enabled, check that the maximum RX packet
-        * length is supported by the configured device.
+        * Check that the maximum RX packet length is supported by the
+        * configured device.
         */
-       if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
-                       RTE_ETHDEV_LOG(ERR,
-                               "Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
-                               port_id, dev_conf->rxmode.max_rx_pkt_len,
-                               dev_info.max_rx_pktlen);
-                       ret = -EINVAL;
-                       goto rollback;
-               } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
-                       RTE_ETHDEV_LOG(ERR,
-                               "Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
-                               port_id, dev_conf->rxmode.max_rx_pkt_len,
-                               (unsigned int)RTE_ETHER_MIN_LEN);
-                       ret = -EINVAL;
-                       goto rollback;
-               }
+       if (dev_conf->rxmode.mtu == 0)
+               dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
+       max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
+       if (max_rx_pktlen > dev_info.max_rx_pktlen) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Ethdev port_id=%u max_rx_pktlen %u > max valid value %u\n",
+                       port_id, max_rx_pktlen, dev_info.max_rx_pktlen);
+               ret = -EINVAL;
+               goto rollback;
+       } else if (max_rx_pktlen < RTE_ETHER_MIN_LEN) {
+               RTE_ETHDEV_LOG(ERR,
+                       "Ethdev port_id=%u max_rx_pktlen %u < min valid value %u\n",
+                       port_id, max_rx_pktlen, RTE_ETHER_MIN_LEN);
+               ret = -EINVAL;
+               goto rollback;
+       }
 
-               /* Scale the MTU size to adapt max_rx_pkt_len */
-               dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
-                               overhead_len;
-       } else {
-               uint16_t pktlen = dev_conf->rxmode.max_rx_pkt_len;
-               if (pktlen < RTE_ETHER_MIN_MTU + overhead_len ||
-                   pktlen > RTE_ETHER_MTU + overhead_len)
+       if ((dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
+               if (dev->data->dev_conf.rxmode.mtu < RTE_ETHER_MIN_MTU ||
+                               dev->data->dev_conf.rxmode.mtu > RTE_ETHER_MTU)
                        /* Use default value */
-                       dev->data->dev_conf.rxmode.max_rx_pkt_len =
-                                               RTE_ETHER_MTU + overhead_len;
+                       dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
        }
 
+       dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
+
        /*
         * If LRO is enabled, check that the maximum aggregated packet
         * size is supported by the configured device.
         */
        if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
                if (dev_conf->rxmode.max_lro_pkt_size == 0)
-                       dev->data->dev_conf.rxmode.max_lro_pkt_size =
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len;
+                       dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
                ret = eth_dev_check_lro_pkt_size(port_id,
                                dev->data->dev_conf.rxmode.max_lro_pkt_size,
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                               max_rx_pktlen,
                                dev_info.max_lro_pkt_size);
                if (ret != 0)
                        goto rollback;
@@ -2146,13 +2153,20 @@ rte_eth_rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
         * If LRO is enabled, check that the maximum aggregated packet
         * size is supported by the configured device.
         */
+       /* Get the real Ethernet overhead length */
        if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+               uint32_t overhead_len;
+               uint32_t max_rx_pktlen;
+               int ret;
+
+               overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+                               dev_info.max_mtu);
+               max_rx_pktlen = dev->data->mtu + overhead_len;
                if (dev->data->dev_conf.rxmode.max_lro_pkt_size == 0)
-                       dev->data->dev_conf.rxmode.max_lro_pkt_size =
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len;
-               int ret = eth_dev_check_lro_pkt_size(port_id,
+                       dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
+               ret = eth_dev_check_lro_pkt_size(port_id,
                                dev->data->dev_conf.rxmode.max_lro_pkt_size,
-                               dev->data->dev_conf.rxmode.max_rx_pkt_len,
+                               max_rx_pktlen,
                                dev_info.max_lro_pkt_size);
                if (ret != 0)
                        return ret;
index b7db29405e0342f304631a268e01cc67c613b5b5..e82019218a9106a4daa220e74cd0e132ba2f9506 100644 (file)
@@ -416,7 +416,7 @@ enum rte_eth_tx_mq_mode {
 struct rte_eth_rxmode {
        /** The multi-queue packet distribution mode to be used, e.g. RSS. */
        enum rte_eth_rx_mq_mode mq_mode;
-       uint32_t max_rx_pkt_len;  /**< Only used if JUMBO_FRAME enabled. */
+       uint32_t mtu;  /**< Requested MTU. */
        /** Maximum allowed size of LRO aggregated packet. */
        uint32_t max_lro_pkt_size;
        uint16_t split_hdr_size;  /**< hdr buf size (header_split enabled).*/
index 0036bda7465c0e7ef29eb2003aa05dd9c8988792..1491c815c312141c87773c2b64b1d9aa9bf7f371 100644 (file)
@@ -28,7 +28,7 @@ RTE_TRACE_POINT(
        rte_trace_point_emit_u16(nb_tx_q);
        rte_trace_point_emit_u32(dev_conf->link_speeds);
        rte_trace_point_emit_u32(dev_conf->rxmode.mq_mode);
-       rte_trace_point_emit_u32(dev_conf->rxmode.max_rx_pkt_len);
+       rte_trace_point_emit_u32(dev_conf->rxmode.mtu);
        rte_trace_point_emit_u64(dev_conf->rxmode.offloads);
        rte_trace_point_emit_u32(dev_conf->txmode.mq_mode);
        rte_trace_point_emit_u64(dev_conf->txmode.offloads);