/**< Timings for send scheduling in TXONLY mode, time between packets. */
uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
+uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */
uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
/* current configuration is in DCB or not,0 means it is not in DCB mode */
* Ethernet device configuration.
*/
struct rte_eth_rxmode rx_mode = {
- .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
- /**< Default maximum frame length. */
+ /* Default maximum frame length.
+ * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead"
+ * in init_config().
+ */
+ .max_rx_pkt_len = 0,
};
struct rte_eth_txmode tx_mode = {
*/
enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
+/*
+ * Used to set forced link speed
+ */
+uint32_t eth_link_speed;
+
/* Forward function declarations */
static void setup_attached_port(portid_t pi);
static void check_all_ports_link_status(uint32_t port_mask);
rte_exit(EXIT_FAILURE,
"rte_eth_dev_info_get() failed\n");
+ ret = update_jumbo_frame_offload(pid);
+ if (ret != 0)
+ printf("Updating jumbo frame offload failed for port %u\n",
+ pid);
+
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
port->tx_conf[k].offloads =
port->dev_conf.txmode.offloads;
+ if (eth_link_speed)
+ port->dev_conf.link_speeds = eth_link_speed;
+
/* set flag to initialize port/queue */
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
" Rx- bad outer L4 checksum: %-14"PRIu64"\n",
fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
fs->rx_bad_outer_l4_csum);
+ printf(" RX- bad outer IP checksum: %-14"PRIu64"\n",
+ fs->rx_bad_outer_ip_csum);
} else {
printf("\n");
}
uint64_t rx_bad_ip_csum;
uint64_t rx_bad_l4_csum;
uint64_t rx_bad_outer_l4_csum;
+ uint64_t rx_bad_outer_ip_csum;
} ports_stats[RTE_MAX_ETHPORTS];
uint64_t total_rx_dropped = 0;
uint64_t total_tx_dropped = 0;
ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
fs->rx_bad_outer_l4_csum;
+ ports_stats[fs->rx_port].rx_bad_outer_ip_csum +=
+ fs->rx_bad_outer_ip_csum;
if (record_core_cycles)
fwd_cycles += fs->core_cycles;
"RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed,
stats.ipackets + stats.imissed);
- if (cur_fwd_eng == &csum_fwd_engine)
+ if (cur_fwd_eng == &csum_fwd_engine) {
printf(" Bad-ipcsum: %-14"PRIu64
" Bad-l4csum: %-14"PRIu64
"Bad-outer-l4csum: %-14"PRIu64"\n",
ports_stats[pt_id].rx_bad_ip_csum,
ports_stats[pt_id].rx_bad_l4_csum,
ports_stats[pt_id].rx_bad_outer_l4_csum);
+ printf(" Bad-outer-ipcsum: %-14"PRIu64"\n",
+ ports_stats[pt_id].rx_bad_outer_ip_csum);
+ }
if (stats.ierrors + stats.rx_nombuf > 0) {
printf(" RX-error: %-"PRIu64"\n", stats.ierrors);
printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf);
fs->rx_bad_ip_csum = 0;
fs->rx_bad_l4_csum = 0;
fs->rx_bad_outer_l4_csum = 0;
+ fs->rx_bad_outer_ip_csum = 0;
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
}
}
+ if (port->flow_list)
+ port_flow_flush(pi);
+
if (rte_eth_dev_stop(pi) != 0)
RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
pi);
}
}
+/*
+ * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload,
+ * MTU is also aligned if JUMBO_FRAME offload is not set.
+ *
+ * port->dev_info should be set before calling this function.
+ *
+ * return 0 on success, negative on error
+ */
+int
+update_jumbo_frame_offload(portid_t portid)
+{
+ struct rte_port *port = &ports[portid];
+ uint32_t eth_overhead;
+ uint64_t rx_offloads;
+ int ret;
+ bool on;
+
+ /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */
+ if (port->dev_info.max_mtu != UINT16_MAX &&
+ port->dev_info.max_rx_pktlen > port->dev_info.max_mtu)
+ eth_overhead = port->dev_info.max_rx_pktlen -
+ port->dev_info.max_mtu;
+ else
+ eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ rx_offloads = port->dev_conf.rxmode.offloads;
+
+ /* Default config value is 0 to use PMD specific overhead */
+ if (port->dev_conf.rxmode.max_rx_pkt_len == 0)
+ port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead;
+
+ if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) {
+ rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ on = false;
+ } else {
+ if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
+ printf("Frame size (%u) is not supported by port %u\n",
+ port->dev_conf.rxmode.max_rx_pkt_len,
+ portid);
+ return -1;
+ }
+ rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ on = true;
+ }
+
+ if (rx_offloads != port->dev_conf.rxmode.offloads) {
+ uint16_t qid;
+
+ port->dev_conf.rxmode.offloads = rx_offloads;
+
+ /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */
+ for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) {
+ if (on)
+ port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ else
+ port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+ }
+
+ /* If JUMBO_FRAME is set MTU conversion done by ethdev layer,
+ * if unset do it here
+ */
+ if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
+ ret = rte_eth_dev_set_mtu(portid,
+ port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead);
+ if (ret)
+ printf("Failed to set MTU to %u for port %u\n",
+ port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead,
+ portid);
+ }
+
+ return 0;
+}
+
void
init_port_config(void)
{