#include <rte_log.h>
#include <rte_debug.h>
#include <rte_cycles.h>
-#include <rte_malloc_heap.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_launch.h>
#include <rte_ethdev.h>
#include <rte_dev.h>
#include <rte_string_fns.h>
-#ifdef RTE_LIBRTE_IXGBE_PMD
+#ifdef RTE_NET_IXGBE
#include <rte_pmd_ixgbe.h>
#endif
-#ifdef RTE_LIBRTE_PDUMP
+#ifdef RTE_LIB_PDUMP
#include <rte_pdump.h>
#endif
#include <rte_flow.h>
#include <rte_metrics.h>
-#ifdef RTE_LIBRTE_BITRATE
+#ifdef RTE_LIB_BITRATESTATS
#include <rte_bitrate.h>
#endif
-#ifdef RTE_LIBRTE_LATENCY_STATS
+#ifdef RTE_LIB_LATENCYSTATS
#include <rte_latencystats.h>
#endif
#endif
#define EXTMEM_HEAP_NAME "extmem"
+#define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M
uint16_t verbose_level = 0; /**< Silent by default. */
int testpmd_logtype; /**< Log type for testpmd logs */
-/* use master core for command line ? */
+/* use main core for command line ? */
uint8_t interactive = 0;
uint8_t auto_start = 0;
uint8_t tx_first;
&csum_fwd_engine,
&icmp_echo_engine,
&noisy_vnf_engine,
-#if defined RTE_LIBRTE_PMD_SOFTNIC
- &softnic_fwd_engine,
-#endif
+ &five_tuple_swap_fwd_engine,
#ifdef RTE_LIBRTE_IEEE1588
&ieee1588_fwd_engine,
#endif
NULL,
};
-struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
+struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
uint16_t mempool_flags;
struct fwd_config cur_fwd_config;
uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
-uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
+uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
+uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
+ DEFAULT_MBUF_DATA_SIZE
+}; /**< Mbuf data space size. */
uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
* specified on command-line. */
uint16_t stats_period; /**< Period to show statistics (disabled by default) */
*/
uint8_t f_quit;
+/*
+ * Configuration of packet segments used to scatter received packets
+ * if some of split features is configured.
+ */
+uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
+uint8_t rx_pkt_nb_segs; /**< Number of segments to split */
+uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
+uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */
+
/*
* Configuration of packet segments used by the "txonly" processing engine.
*/
uint8_t txonly_multi_flow;
/**< Whether multiple flows are generated in TXONLY mode. */
+uint32_t tx_pkt_times_inter;
+/**< Timings for send scheduling in TXONLY mode, time between bursts. */
+
+uint32_t tx_pkt_times_intra;
+/**< Timings for send scheduling in TXONLY mode, time between packets. */
+
uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
/*
* Configurable number of RX/TX queues.
*/
+queueid_t nb_hairpinq; /**< Number of hairpin queues per port. */
queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
queueid_t nb_txq = 1; /**< Number of TX queues per port. */
*/
uint8_t no_link_check = 0; /* check by default */
+/*
+ * Don't automatically start all ports in interactive mode.
+ */
+uint8_t no_device_start = 0;
+
/*
* Enable link status change notification
*/
/* After attach, port setup is called on event or by iterator */
bool setup_on_probe_event = true;
+/* Clear ptypes on port initialization. */
+uint8_t clear_ptypes = true;
+
+/* Hairpin ports configuration mode. */
+uint16_t hairpin_mode;
+
/* Pretty printing of ethdev events */
static const char * const eth_event_desc[] = {
[RTE_ETH_EVENT_UNKNOWN] = "unknown",
[RTE_ETH_EVENT_INTR_RMV] = "device removal",
[RTE_ETH_EVENT_NEW] = "device probed",
[RTE_ETH_EVENT_DESTROY] = "device released",
+ [RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
[RTE_ETH_EVENT_MAX] = NULL,
};
(UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
(UINT32_C(1) << RTE_ETH_EVENT_IPSEC) |
(UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
- (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
+ (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV) |
+ (UINT32_C(1) << RTE_ETH_EVENT_FLOW_AGED);
/*
* Decide if all memory are locked for performance.
*/
* NIC bypass mode configuration options.
*/
-#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
+#if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
/* The NIC bypass watchdog timeout. */
uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
#endif
-#ifdef RTE_LIBRTE_LATENCY_STATS
+#ifdef RTE_LIB_LATENCYSTATS
/*
* Set when latency stats is enabled in the commandline
*/
uint8_t xstats_hide_zero;
+/*
+ * Measure of CPU cycles disabled by default
+ */
+uint8_t record_core_cycles;
+
+/*
+ * Display of RX and TX bursts disabled by default
+ */
+uint8_t record_burst_stats;
+
unsigned int num_sockets = 0;
unsigned int socket_ids[RTE_MAX_NUMA_NODES];
-#ifdef RTE_LIBRTE_BITRATE
+#ifdef RTE_LIB_BITRATESTATS
/* Bitrate statistics */
struct rte_stats_bitrates *bitrate_data;
lcoreid_t bitrate_lcore_id;
struct gro_status gro_ports[RTE_MAX_ETHPORTS];
uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
-struct vxlan_encap_conf vxlan_encap_conf = {
- .select_ipv4 = 1,
- .select_vlan = 0,
- .select_tos_ttl = 0,
- .vni = "\x00\x00\x00",
- .udp_src = 0,
- .udp_dst = RTE_BE16(4789),
- .ipv4_src = RTE_IPV4(127, 0, 0, 1),
- .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
- .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x01",
- .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x11\x11",
- .vlan_tci = 0,
- .ip_tos = 0,
- .ip_ttl = 255,
- .eth_src = "\x00\x00\x00\x00\x00\x00",
- .eth_dst = "\xff\xff\xff\xff\xff\xff",
-};
-
-struct nvgre_encap_conf nvgre_encap_conf = {
- .select_ipv4 = 1,
- .select_vlan = 0,
- .tni = "\x00\x00\x00",
- .ipv4_src = RTE_IPV4(127, 0, 0, 1),
- .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
- .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x01",
- .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x11\x11",
- .vlan_tci = 0,
- .eth_src = "\x00\x00\x00\x00\x00\x00",
- .eth_dst = "\xff\xff\xff\xff\xff\xff",
-};
+/*
+ * hexadecimal bitmask of RX mq mode can be enabled.
+ */
+enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS;
/* Forward function declarations */
static void setup_attached_port(portid_t pi);
struct gso_status gso_ports[RTE_MAX_ETHPORTS];
uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
+/* Holds the registered mbuf dynamic flags names. */
+char dynf_names[64][RTE_MBUF_DYN_NAMESIZE];
+
/*
* Helper function to check if socket is already discovered.
* If yes, return positive value. If not, return zero.
}
socket_ids[num_sockets++] = sock_num;
}
- if (i == rte_get_master_lcore())
+ if (i == rte_get_main_lcore())
continue;
fwd_lcores_cpuids[nb_lc++] = i;
}
}
}
+static unsigned int
+setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
+ char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
+{
+ struct rte_pktmbuf_extmem *xmem;
+ unsigned int ext_num, zone_num, elt_num;
+ uint16_t elt_size;
+
+ elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
+ elt_num = EXTBUF_ZONE_SIZE / elt_size;
+ zone_num = (nb_mbufs + elt_num - 1) / elt_num;
+
+ xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
+ if (xmem == NULL) {
+ TESTPMD_LOG(ERR, "Cannot allocate memory for "
+ "external buffer descriptors\n");
+ *ext_mem = NULL;
+ return 0;
+ }
+ for (ext_num = 0; ext_num < zone_num; ext_num++) {
+ struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int ret;
+
+ ret = snprintf(mz_name, sizeof(mz_name),
+ RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ errno = ENAMETOOLONG;
+ ext_num = 0;
+ break;
+ }
+ mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
+ socket_id,
+ RTE_MEMZONE_IOVA_CONTIG |
+ RTE_MEMZONE_1GB |
+ RTE_MEMZONE_SIZE_HINT_ONLY,
+ EXTBUF_ZONE_SIZE);
+ if (mz == NULL) {
+ /*
+ * The caller exits on external buffer creation
+ * error, so there is no need to free memzones.
+ */
+ errno = ENOMEM;
+ ext_num = 0;
+ break;
+ }
+ xseg->buf_ptr = mz->addr;
+ xseg->buf_iova = mz->iova;
+ xseg->buf_len = EXTBUF_ZONE_SIZE;
+ xseg->elt_size = elt_size;
+ }
+ if (ext_num == 0 && xmem != NULL) {
+ free(xmem);
+ xmem = NULL;
+ }
+ *ext_mem = xmem;
+ return ext_num;
+}
+
/*
* Configuration initialisation done once at init time.
*/
static struct rte_mempool *
mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
- unsigned int socket_id)
+ unsigned int socket_id, uint16_t size_idx)
{
char pool_name[RTE_MEMPOOL_NAMESIZE];
struct rte_mempool *rte_mp = NULL;
uint32_t mb_size;
mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
- mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
TESTPMD_LOG(INFO,
"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
heap_socket);
break;
}
+ case MP_ALLOC_XBUF:
+ {
+ struct rte_pktmbuf_extmem *ext_mem;
+ unsigned int ext_num;
+
+ ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
+ socket_id, pool_name, &ext_mem);
+ if (ext_num == 0)
+ rte_exit(EXIT_FAILURE,
+ "Can't create pinned data buffers\n");
+
+ TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
+ rte_mbuf_best_mempool_ops());
+ rte_mp = rte_pktmbuf_pool_create_extbuf
+ (pool_name, nb_mbuf, mb_mempool_cache,
+ 0, mbuf_seg_size, socket_id,
+ ext_mem, ext_num);
+ free(ext_mem);
+ break;
+ }
default:
{
rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
queueid_t
get_allowed_max_nb_rxq(portid_t *pid)
{
- queueid_t allowed_max_rxq = MAX_QUEUE_ID;
+ queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
+ bool max_rxq_valid = false;
portid_t pi;
struct rte_eth_dev_info dev_info;
RTE_ETH_FOREACH_DEV(pi) {
- rte_eth_dev_info_get(pi, &dev_info);
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ max_rxq_valid = true;
if (dev_info.max_rx_queues < allowed_max_rxq) {
allowed_max_rxq = dev_info.max_rx_queues;
*pid = pi;
}
}
- return allowed_max_rxq;
+ return max_rxq_valid ? allowed_max_rxq : 0;
}
/*
queueid_t
get_allowed_max_nb_txq(portid_t *pid)
{
- queueid_t allowed_max_txq = MAX_QUEUE_ID;
+ queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
+ bool max_txq_valid = false;
portid_t pi;
struct rte_eth_dev_info dev_info;
RTE_ETH_FOREACH_DEV(pi) {
- rte_eth_dev_info_get(pi, &dev_info);
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ max_txq_valid = true;
if (dev_info.max_tx_queues < allowed_max_txq) {
allowed_max_txq = dev_info.max_tx_queues;
*pid = pi;
}
}
- return allowed_max_txq;
+ return max_txq_valid ? allowed_max_txq : 0;
}
/*
return 0;
}
+/*
+ * Get the allowed maximum number of RXDs of every rx queue.
+ * *pid return the port id which has minimal value of
+ * max_rxd in all queues of all ports.
+ */
+static uint16_t
+get_allowed_max_nb_rxd(portid_t *pid)
+{
+ uint16_t allowed_max_rxd = UINT16_MAX;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
+ allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
+ *pid = pi;
+ }
+ }
+ return allowed_max_rxd;
+}
+
+/*
+ * Get the allowed minimal number of RXDs of every rx queue.
+ * *pid return the port id which has minimal value of
+ * min_rxd in all queues of all ports.
+ */
+static uint16_t
+get_allowed_min_nb_rxd(portid_t *pid)
+{
+ uint16_t allowed_min_rxd = 0;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
+ allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
+ *pid = pi;
+ }
+ }
+
+ return allowed_min_rxd;
+}
+
+/*
+ * Check input rxd is valid or not.
+ * If input rxd is not greater than any of maximum number
+ * of RXDs of every Rx queues and is not less than any of
+ * minimal number of RXDs of every Rx queues, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_rxd(queueid_t rxd)
+{
+ uint16_t allowed_max_rxd;
+ uint16_t allowed_min_rxd;
+ portid_t pid = 0;
+
+ allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
+ if (rxd > allowed_max_rxd) {
+ printf("Fail: input rxd (%u) can't be greater "
+ "than max_rxds (%u) of port %u\n",
+ rxd,
+ allowed_max_rxd,
+ pid);
+ return -1;
+ }
+
+ allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
+ if (rxd < allowed_min_rxd) {
+ printf("Fail: input rxd (%u) can't be less "
+ "than min_rxds (%u) of port %u\n",
+ rxd,
+ allowed_min_rxd,
+ pid);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Get the allowed maximum number of TXDs of every rx queues.
+ * *pid return the port id which has minimal value of
+ * max_txd in every tx queue.
+ */
+static uint16_t
+get_allowed_max_nb_txd(portid_t *pid)
+{
+ uint16_t allowed_max_txd = UINT16_MAX;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
+ allowed_max_txd = dev_info.tx_desc_lim.nb_max;
+ *pid = pi;
+ }
+ }
+ return allowed_max_txd;
+}
+
+/*
+ * Get the allowed maximum number of TXDs of every tx queues.
+ * *pid return the port id which has minimal value of
+ * min_txd in every tx queue.
+ */
+static uint16_t
+get_allowed_min_nb_txd(portid_t *pid)
+{
+ uint16_t allowed_min_txd = 0;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
+ allowed_min_txd = dev_info.tx_desc_lim.nb_min;
+ *pid = pi;
+ }
+ }
+
+ return allowed_min_txd;
+}
+
+/*
+ * Check input txd is valid or not.
+ * If input txd is not greater than any of maximum number
+ * of TXDs of every Rx queues, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_txd(queueid_t txd)
+{
+ uint16_t allowed_max_txd;
+ uint16_t allowed_min_txd;
+ portid_t pid = 0;
+
+ allowed_max_txd = get_allowed_max_nb_txd(&pid);
+ if (txd > allowed_max_txd) {
+ printf("Fail: input txd (%u) can't be greater "
+ "than max_txds (%u) of port %u\n",
+ txd,
+ allowed_max_txd,
+ pid);
+ return -1;
+ }
+
+ allowed_min_txd = get_allowed_min_nb_txd(&pid);
+ if (txd < allowed_min_txd) {
+ printf("Fail: input txd (%u) can't be less "
+ "than min_txds (%u) of port %u\n",
+ txd,
+ allowed_min_txd,
+ pid);
+ return -1;
+ }
+ return 0;
+}
+
+
+/*
+ * Get the allowed maximum number of hairpin queues.
+ * *pid return the port id which has minimal value of
+ * max_hairpin_queues in all ports.
+ */
+queueid_t
+get_allowed_max_nb_hairpinq(portid_t *pid)
+{
+ queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
+ portid_t pi;
+ struct rte_eth_hairpin_cap cap;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
+ *pid = pi;
+ return 0;
+ }
+ if (cap.max_nb_queues < allowed_max_hairpinq) {
+ allowed_max_hairpinq = cap.max_nb_queues;
+ *pid = pi;
+ }
+ }
+ return allowed_max_hairpinq;
+}
+
+/*
+ * Check input hairpin is valid or not.
+ * If input hairpin is not greater than any of maximum number
+ * of hairpin queues of all ports, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_hairpinq(queueid_t hairpinq)
+{
+ queueid_t allowed_max_hairpinq;
+ portid_t pid = 0;
+
+ allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
+ if (hairpinq > allowed_max_hairpinq) {
+ printf("Fail: input hairpin (%u) can't be greater "
+ "than max_hairpin_queues (%u) of port %u\n",
+ hairpinq, allowed_max_hairpinq, pid);
+ return -1;
+ }
+ return 0;
+}
+
static void
init_config(void)
{
uint16_t data_size;
bool warning = 0;
int k;
+ int ret;
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
/* Apply default TxRx configuration for all ports */
port->dev_conf.txmode = tx_mode;
port->dev_conf.rxmode = rx_mode;
- rte_eth_dev_info_get(pid, &port->dev_info);
+
+ ret = eth_dev_info_get_print_err(pid, &port->dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_info_get() failed\n");
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
- if (!(port->dev_info.tx_offload_capa &
- DEV_TX_OFFLOAD_MATCH_METADATA))
- port->dev_conf.txmode.offloads &=
- ~DEV_TX_OFFLOAD_MATCH_METADATA;
if (numa_support) {
if (port_numa[pid] != NUMA_NO_CONFIG)
port_per_socket[port_numa[pid]]++;
port->dev_info.rx_desc_lim.nb_mtu_seg_max;
if ((data_size + RTE_PKTMBUF_HEADROOM) >
- mbuf_data_size) {
- mbuf_data_size = data_size +
+ mbuf_data_size[0]) {
+ mbuf_data_size[0] = data_size +
RTE_PKTMBUF_HEADROOM;
warning = 1;
}
}
if (warning)
- TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
- mbuf_data_size);
-
+ TESTPMD_LOG(WARNING,
+ "Configured mbuf size of the first segment %hu\n",
+ mbuf_data_size[0]);
/*
* Create pools of mbuf.
* If NUMA support is disabled, create a single pool of mbuf in
}
if (numa_support) {
- uint8_t i;
+ uint8_t i, j;
for (i = 0; i < num_sockets; i++)
- mempools[i] = mbuf_pool_create(mbuf_data_size,
- nb_mbuf_per_pool,
- socket_ids[i]);
+ for (j = 0; j < mbuf_data_size_n; j++)
+ mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+ mbuf_pool_create(mbuf_data_size[j],
+ nb_mbuf_per_pool,
+ socket_ids[i], j);
} else {
- if (socket_num == UMA_NO_CONFIG)
- mempools[0] = mbuf_pool_create(mbuf_data_size,
- nb_mbuf_per_pool, 0);
- else
- mempools[socket_num] = mbuf_pool_create
- (mbuf_data_size,
- nb_mbuf_per_pool,
- socket_num);
+ uint8_t i;
+
+ for (i = 0; i < mbuf_data_size_n; i++)
+ mempools[i] = mbuf_pool_create
+ (mbuf_data_size[i],
+ nb_mbuf_per_pool,
+ socket_num == UMA_NO_CONFIG ?
+ 0 : socket_num, i);
}
init_port_config();
*/
for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
mbp = mbuf_pool_find(
- rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
+ rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
if (mbp == NULL)
- mbp = mbuf_pool_find(0);
+ mbp = mbuf_pool_find(0, 0);
fwd_lcores[lc_id]->mbp = mbp;
/* initialize GSO context */
fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
"rte_gro_ctx_create() failed\n");
}
}
-
-#if defined RTE_LIBRTE_PMD_SOFTNIC
- if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
- RTE_ETH_FOREACH_DEV(pid) {
- port = &ports[pid];
- const char *driver = port->dev_info.driver_name;
-
- if (strcmp(driver, "net_softnic") == 0)
- port->softport.fwd_lcore_arg = fwd_lcores;
- }
- }
-#endif
-
}
reconfig(portid_t new_port_id, unsigned socket_id)
{
struct rte_port *port;
+ int ret;
/* Reconfiguration of Ethernet ports. */
port = &ports[new_port_id];
- rte_eth_dev_info_get(new_port_id, &port->dev_info);
+
+ ret = eth_dev_info_get_print_err(new_port_id, &port->dev_info);
+ if (ret != 0)
+ return;
/* set flag to initialize port/queue */
port->need_reconfig = 1;
return 0;
}
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
static void
pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
{
- unsigned int total_burst;
- unsigned int nb_burst;
- unsigned int burst_stats[3];
- uint16_t pktnb_stats[3];
+ uint64_t total_burst, sburst;
+ uint64_t nb_burst;
+ uint64_t burst_stats[4];
+ uint16_t pktnb_stats[4];
uint16_t nb_pkt;
- int burst_percent[3];
+ int burst_percent[4], sburstp;
+ int i;
/*
* First compute the total number of packet bursts and the
* two highest numbers of bursts of the same number of packets.
*/
- total_burst = 0;
- burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
- pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
- for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
+ memset(&burst_stats, 0x0, sizeof(burst_stats));
+ memset(&pktnb_stats, 0x0, sizeof(pktnb_stats));
+
+ /* Show stats for 0 burst size always */
+ total_burst = pbs->pkt_burst_spread[0];
+ burst_stats[0] = pbs->pkt_burst_spread[0];
+ pktnb_stats[0] = 0;
+
+ /* Find the next 2 burst sizes with highest occurrences. */
+ for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
nb_burst = pbs->pkt_burst_spread[nb_pkt];
+
if (nb_burst == 0)
continue;
+
total_burst += nb_burst;
- if (nb_burst > burst_stats[0]) {
- burst_stats[1] = burst_stats[0];
- pktnb_stats[1] = pktnb_stats[0];
- burst_stats[0] = nb_burst;
- pktnb_stats[0] = nb_pkt;
- } else if (nb_burst > burst_stats[1]) {
+
+ if (nb_burst > burst_stats[1]) {
+ burst_stats[2] = burst_stats[1];
+ pktnb_stats[2] = pktnb_stats[1];
burst_stats[1] = nb_burst;
pktnb_stats[1] = nb_pkt;
+ } else if (nb_burst > burst_stats[2]) {
+ burst_stats[2] = nb_burst;
+ pktnb_stats[2] = nb_pkt;
}
}
if (total_burst == 0)
return;
- burst_percent[0] = (burst_stats[0] * 100) / total_burst;
- printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
- burst_percent[0], (int) pktnb_stats[0]);
- if (burst_stats[0] == total_burst) {
- printf("]\n");
- return;
- }
- if (burst_stats[0] + burst_stats[1] == total_burst) {
- printf(" + %d%% of %d pkts]\n",
- 100 - burst_percent[0], pktnb_stats[1]);
- return;
- }
- burst_percent[1] = (burst_stats[1] * 100) / total_burst;
- burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
- if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
- printf(" + %d%% of others]\n", 100 - burst_percent[0]);
- return;
+
+ printf(" %s-bursts : %"PRIu64" [", rx_tx, total_burst);
+ for (i = 0, sburst = 0, sburstp = 0; i < 4; i++) {
+ if (i == 3) {
+ printf("%d%% of other]\n", 100 - sburstp);
+ return;
+ }
+
+ sburst += burst_stats[i];
+ if (sburst == total_burst) {
+ printf("%d%% of %d pkts]\n",
+ 100 - sburstp, (int) pktnb_stats[i]);
+ return;
+ }
+
+ burst_percent[i] =
+ (double)burst_stats[i] / total_burst * 100;
+ printf("%d%% of %d pkts + ",
+ burst_percent[i], (int) pktnb_stats[i]);
+ sburstp += burst_percent[i];
}
- printf(" + %d%% of %d pkts + %d%% of others]\n",
- burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
}
-#endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
static void
fwd_stream_stats_display(streamid_t stream_id)
printf("\n");
}
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- pkt_burst_stats_display("RX", &fs->rx_burst_stats);
- pkt_burst_stats_display("TX", &fs->tx_burst_stats);
-#endif
+ if (record_burst_stats) {
+ pkt_burst_stats_display("RX", &fs->rx_burst_stats);
+ pkt_burst_stats_display("TX", &fs->tx_burst_stats);
+ }
}
void
uint64_t total_tx_dropped = 0;
uint64_t total_rx_nombuf = 0;
struct rte_eth_stats stats;
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
uint64_t fwd_cycles = 0;
-#endif
uint64_t total_recv = 0;
uint64_t total_xmit = 0;
struct rte_port *port;
ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
fs->rx_bad_outer_l4_csum;
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- fwd_cycles += fs->core_cycles;
-#endif
+ if (record_core_cycles)
+ fwd_cycles += fs->core_cycles;
}
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
uint8_t j;
stats.opackets + ports_stats[pt_id].tx_dropped);
}
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- if (ports_stats[pt_id].rx_stream)
- pkt_burst_stats_display("RX",
- &ports_stats[pt_id].rx_stream->rx_burst_stats);
- if (ports_stats[pt_id].tx_stream)
- pkt_burst_stats_display("TX",
- &ports_stats[pt_id].tx_stream->tx_burst_stats);
-#endif
+ if (record_burst_stats) {
+ if (ports_stats[pt_id].rx_stream)
+ pkt_burst_stats_display("RX",
+ &ports_stats[pt_id].rx_stream->rx_burst_stats);
+ if (ports_stats[pt_id].tx_stream)
+ pkt_burst_stats_display("TX",
+ &ports_stats[pt_id].tx_stream->tx_burst_stats);
+ }
if (port->rx_queue_stats_mapping_enabled) {
printf("\n");
printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
"%s\n",
acc_stats_border, acc_stats_border);
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- if (total_recv > 0)
- printf("\n CPU cycles/packet=%u (total cycles="
- "%"PRIu64" / total RX packets=%"PRIu64")\n",
- (unsigned int)(fwd_cycles / total_recv),
- fwd_cycles, total_recv);
-#endif
+ if (record_core_cycles) {
+#define CYC_PER_MHZ 1E6
+ if (total_recv > 0 || total_xmit > 0) {
+ uint64_t total_pkts = 0;
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 ||
+ strcmp(cur_fwd_eng->fwd_mode_name, "flowgen") == 0)
+ total_pkts = total_xmit;
+ else
+ total_pkts = total_recv;
+
+ printf("\n CPU cycles/packet=%.2F (total cycles="
+ "%"PRIu64" / total %s packets=%"PRIu64") at %"PRIu64
+ " MHz Clock\n",
+ (double) fwd_cycles / total_pkts,
+ fwd_cycles, cur_fwd_eng->fwd_mode_name, total_pkts,
+ (uint64_t)(rte_get_tsc_hz() / CYC_PER_MHZ));
+ }
+ }
}
void
fs->rx_bad_l4_csum = 0;
fs->rx_bad_outer_l4_csum = 0;
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
-#endif
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
fs->core_cycles = 0;
-#endif
}
}
struct fwd_stream **fsm;
streamid_t nb_fs;
streamid_t sm_id;
-#ifdef RTE_LIBRTE_BITRATE
+#ifdef RTE_LIB_BITRATESTATS
uint64_t tics_per_1sec;
uint64_t tics_datum;
uint64_t tics_current;
do {
for (sm_id = 0; sm_id < nb_fs; sm_id++)
(*pkt_fwd)(fsm[sm_id]);
-#ifdef RTE_LIBRTE_BITRATE
+#ifdef RTE_LIB_BITRATESTATS
if (bitrate_enabled != 0 &&
bitrate_lcore_id == rte_lcore_id()) {
tics_current = rte_rdtsc();
}
}
#endif
-#ifdef RTE_LIBRTE_LATENCY_STATS
+#ifdef RTE_LIB_LATENCYSTATS
if (latencystats_enabled != 0 &&
latencystats_lcore_id == rte_lcore_id())
rte_latencystats_update();
return 1;
}
+/* Configure the Rx and Tx hairpin queues for the selected port. */
+static int
+setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
+{
+ queueid_t qi;
+ struct rte_eth_hairpin_conf hairpin_conf = {
+ .peer_count = 1,
+ };
+ int i;
+ int diag;
+ struct rte_port *port = &ports[pi];
+ uint16_t peer_rx_port = pi;
+ uint16_t peer_tx_port = pi;
+ uint32_t manual = 1;
+ uint32_t tx_exp = hairpin_mode & 0x10;
+
+ if (!(hairpin_mode & 0xf)) {
+ peer_rx_port = pi;
+ peer_tx_port = pi;
+ manual = 0;
+ } else if (hairpin_mode & 0x1) {
+ peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
+ RTE_ETH_DEV_NO_OWNER);
+ if (peer_tx_port >= RTE_MAX_ETHPORTS)
+ peer_tx_port = rte_eth_find_next_owned_by(0,
+ RTE_ETH_DEV_NO_OWNER);
+ if (p_pi != RTE_MAX_ETHPORTS) {
+ peer_rx_port = p_pi;
+ } else {
+ uint16_t next_pi;
+
+ /* Last port will be the peer RX port of the first. */
+ RTE_ETH_FOREACH_DEV(next_pi)
+ peer_rx_port = next_pi;
+ }
+ manual = 1;
+ } else if (hairpin_mode & 0x2) {
+ if (cnt_pi & 0x1) {
+ peer_rx_port = p_pi;
+ } else {
+ peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
+ RTE_ETH_DEV_NO_OWNER);
+ if (peer_rx_port >= RTE_MAX_ETHPORTS)
+ peer_rx_port = pi;
+ }
+ peer_tx_port = peer_rx_port;
+ manual = 1;
+ }
+
+ for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
+ hairpin_conf.peers[0].port = peer_rx_port;
+ hairpin_conf.peers[0].queue = i + nb_rxq;
+ hairpin_conf.manual_bind = !!manual;
+ hairpin_conf.tx_explicit = !!tx_exp;
+ diag = rte_eth_tx_hairpin_queue_setup
+ (pi, qi, nb_txd, &hairpin_conf);
+ i++;
+ if (diag == 0)
+ continue;
+
+ /* Fail to setup rx queue, return */
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING,
+ RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back "
+ "to stopped\n", pi);
+ printf("Fail to configure port %d hairpin "
+ "queues\n", pi);
+ /* try to reconfigure queues next time */
+ port->need_reconfig_queues = 1;
+ return -1;
+ }
+ for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
+ hairpin_conf.peers[0].port = peer_tx_port;
+ hairpin_conf.peers[0].queue = i + nb_txq;
+ hairpin_conf.manual_bind = !!manual;
+ hairpin_conf.tx_explicit = !!tx_exp;
+ diag = rte_eth_rx_hairpin_queue_setup
+ (pi, qi, nb_rxd, &hairpin_conf);
+ i++;
+ if (diag == 0)
+ continue;
+
+ /* Fail to setup rx queue, return */
+ if (rte_atomic16_cmpset(&(port->port_status),
+ RTE_PORT_HANDLING,
+ RTE_PORT_STOPPED) == 0)
+ printf("Port %d can not be set back "
+ "to stopped\n", pi);
+ printf("Fail to configure port %d hairpin "
+ "queues\n", pi);
+ /* try to reconfigure queues next time */
+ port->need_reconfig_queues = 1;
+ return -1;
+ }
+ return 0;
+}
+
+/* Configure the Rx with optional split. */
+int
+rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
+{
+ union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
+ unsigned int i, mp_n;
+ int ret;
+
+ if (rx_pkt_nb_segs <= 1 ||
+ (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
+ rx_conf->rx_seg = NULL;
+ rx_conf->rx_nseg = 0;
+ ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
+ nb_rx_desc, socket_id,
+ rx_conf, mp);
+ return ret;
+ }
+ for (i = 0; i < rx_pkt_nb_segs; i++) {
+ struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
+ struct rte_mempool *mpx;
+ /*
+ * Use last valid pool for the segments with number
+ * exceeding the pool index.
+ */
+ mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
+ mpx = mbuf_pool_find(socket_id, mp_n);
+ /* Handle zero as mbuf data buffer size. */
+ rx_seg->length = rx_pkt_seg_lengths[i] ?
+ rx_pkt_seg_lengths[i] :
+ mbuf_data_size[mp_n];
+ rx_seg->offset = i < rx_pkt_nb_offs ?
+ rx_pkt_seg_offsets[i] : 0;
+ rx_seg->mp = mpx ? mpx : mp;
+ }
+ rx_conf->rx_nseg = rx_pkt_nb_segs;
+ rx_conf->rx_seg = rx_useg;
+ ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
+ socket_id, rx_conf, NULL);
+ rx_conf->rx_seg = NULL;
+ rx_conf->rx_nseg = 0;
+ return ret;
+}
+
int
start_port(portid_t pid)
{
int diag, need_check_link_status = -1;
portid_t pi;
+ portid_t p_pi = RTE_MAX_ETHPORTS;
+ portid_t pl[RTE_MAX_ETHPORTS];
+ portid_t peer_pl[RTE_MAX_ETHPORTS];
+ uint16_t cnt_pi = 0;
+ uint16_t cfg_pi = 0;
+ int peer_pi;
queueid_t qi;
struct rte_port *port;
struct rte_ether_addr mac_addr;
+ struct rte_eth_hairpin_cap cap;
if (port_id_is_invalid(pid, ENABLED_WARN))
return 0;
configure_rxtx_dump_callbacks(0);
printf("Configuring Port %d (socket %u)\n", pi,
port->socket_id);
+ if (nb_hairpinq > 0 &&
+ rte_eth_dev_hairpin_capability_get(pi, &cap)) {
+ printf("Port %d doesn't support hairpin "
+ "queues\n", pi);
+ return -1;
+ }
/* configure port */
- diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
- &(port->dev_conf));
+ diag = rte_eth_dev_configure(pi, nb_rxq + nb_hairpinq,
+ nb_txq + nb_hairpinq,
+ &(port->dev_conf));
if (diag != 0) {
if (rte_atomic16_cmpset(&(port->port_status),
RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
if ((numa_support) &&
(rxring_numa[pi] != NUMA_NO_CONFIG)) {
struct rte_mempool * mp =
- mbuf_pool_find(rxring_numa[pi]);
+ mbuf_pool_find
+ (rxring_numa[pi], 0);
if (mp == NULL) {
printf("Failed to setup RX queue:"
"No mempool allocation"
return -1;
}
- diag = rte_eth_rx_queue_setup(pi, qi,
+ diag = rx_queue_setup(pi, qi,
port->nb_rx_desc[qi],
rxring_numa[pi],
&(port->rx_conf[qi]),
mp);
} else {
struct rte_mempool *mp =
- mbuf_pool_find(port->socket_id);
+ mbuf_pool_find
+ (port->socket_id, 0);
if (mp == NULL) {
printf("Failed to setup RX queue:"
"No mempool allocation"
port->socket_id);
return -1;
}
- diag = rte_eth_rx_queue_setup(pi, qi,
+ diag = rx_queue_setup(pi, qi,
port->nb_rx_desc[qi],
port->socket_id,
&(port->rx_conf[qi]),
port->need_reconfig_queues = 1;
return -1;
}
+ /* setup hairpin queues */
+ if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
+ return -1;
}
configure_rxtx_dump_callbacks(verbose_level);
+ if (clear_ptypes) {
+ diag = rte_eth_dev_set_ptypes(pi, RTE_PTYPE_UNKNOWN,
+ NULL, 0);
+ if (diag < 0)
+ printf(
+ "Port %d: Failed to disable Ptype parsing\n",
+ pi);
+ }
+
+ p_pi = pi;
+ cnt_pi++;
+
/* start port */
if (rte_eth_dev_start(pi) < 0) {
printf("Fail to start port %d\n", pi);
RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
printf("Port %d can not be set into started\n", pi);
- rte_eth_macaddr_get(pi, &mac_addr);
- printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
+ if (eth_macaddr_get_print_err(pi, &mac_addr) == 0)
+ printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
/* at least one port started, need checking link status */
need_check_link_status = 1;
+
+ pl[cfg_pi++] = pi;
}
if (need_check_link_status == 1 && !no_link_check)
else if (need_check_link_status == 0)
printf("Please stop the ports first\n");
+ if (hairpin_mode & 0xf) {
+ uint16_t i;
+ int j;
+
+ /* bind all started hairpin ports */
+ for (i = 0; i < cfg_pi; i++) {
+ pi = pl[i];
+ /* bind current Tx to all peer Rx */
+ peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
+ RTE_MAX_ETHPORTS, 1);
+ if (peer_pi < 0)
+ return peer_pi;
+ for (j = 0; j < peer_pi; j++) {
+ if (!port_is_started(peer_pl[j]))
+ continue;
+ diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
+ if (diag < 0) {
+ printf("Error during binding hairpin"
+ " Tx port %u to %u: %s\n",
+ pi, peer_pl[j],
+ rte_strerror(-diag));
+ return -1;
+ }
+ }
+ /* bind all peer Tx to current Rx */
+ peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
+ RTE_MAX_ETHPORTS, 0);
+ if (peer_pi < 0)
+ return peer_pi;
+ for (j = 0; j < peer_pi; j++) {
+ if (!port_is_started(peer_pl[j]))
+ continue;
+ diag = rte_eth_hairpin_bind(peer_pl[j], pi);
+ if (diag < 0) {
+ printf("Error during binding hairpin"
+ " Tx port %u to %u: %s\n",
+ peer_pl[j], pi,
+ rte_strerror(-diag));
+ return -1;
+ }
+ }
+ }
+ }
+
printf("Done\n");
return 0;
}
portid_t pi;
struct rte_port *port;
int need_check_link_status = 0;
+ portid_t peer_pl[RTE_MAX_ETHPORTS];
+ int peer_pi;
if (dcb_test) {
dcb_test = 0;
RTE_PORT_HANDLING) == 0)
continue;
- rte_eth_dev_stop(pi);
+ if (hairpin_mode & 0xf) {
+ int j;
+
+ rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
+ /* unbind all peer Tx from current Rx */
+ peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
+ RTE_MAX_ETHPORTS, 0);
+ if (peer_pi < 0)
+ continue;
+ for (j = 0; j < peer_pi; j++) {
+ if (!port_is_started(peer_pl[j]))
+ continue;
+ rte_eth_hairpin_unbind(peer_pl[j], pi);
+ }
+ }
+
+ if (rte_eth_dev_stop(pi) != 0)
+ RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n",
+ pi);
if (rte_atomic16_cmpset(&(port->port_status),
RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
continue;
}
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
- printf("Port %d is now not stopped\n", pi);
- continue;
- }
-
- if (port->flow_list)
- port_flow_flush(pi);
+ port_flow_flush(pi);
rte_eth_dev_close(pi);
-
- remove_invalid_ports();
-
- if (rte_atomic16_cmpset(&(port->port_status),
- RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
- printf("Port %d cannot be set to closed\n", pi);
}
+ remove_invalid_ports();
printf("Done\n");
}
if (port_id_is_invalid(pid, ENABLED_WARN))
return;
+ if ((pid == (portid_t)RTE_PORT_ALL && !all_ports_stopped()) ||
+ (pid != (portid_t)RTE_PORT_ALL && !port_is_stopped(pid))) {
+ printf("Can not reset port(s), please stop port(s) first.\n");
+ return;
+ }
+
printf("Resetting ports...\n");
RTE_ETH_FOREACH_DEV(pi) {
return;
}
- if (rte_dev_probe(identifier) != 0) {
+ if (rte_dev_probe(identifier) < 0) {
TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
return;
}
setup_attached_port(portid_t pi)
{
unsigned int socket_id;
+ int ret;
socket_id = (unsigned)rte_eth_dev_socket_id(pi);
/* if socket_id is invalid, set to the first available socket. */
if (check_socket_id(socket_id) < 0)
socket_id = socket_ids[0];
reconfig(pi, socket_id);
- rte_eth_promiscuous_enable(pi);
+ ret = rte_eth_promiscuous_enable(pi);
+ if (ret != 0)
+ printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
+ pi, rte_strerror(-ret));
ports_ids[nb_ports++] = pi;
fwd_ports_ids[nb_fwd_ports++] = pi;
printf("Done\n");
}
-void
-detach_port_device(portid_t port_id)
+static void
+detach_device(struct rte_device *dev)
{
- struct rte_device *dev;
portid_t sibling;
- printf("Removing a device...\n");
-
- dev = rte_eth_devices[port_id].device;
if (dev == NULL) {
printf("Device already removed\n");
return;
}
+ printf("Removing a device...\n");
+
+ RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
+ if (ports[sibling].port_status != RTE_PORT_CLOSED) {
+ if (ports[sibling].port_status != RTE_PORT_STOPPED) {
+ printf("Port %u not stopped\n", sibling);
+ return;
+ }
+ port_flow_flush(sibling);
+ }
+ }
+
+ if (rte_dev_remove(dev) < 0) {
+ TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
+ return;
+ }
+ remove_invalid_ports();
+
+ printf("Device is detached\n");
+ printf("Now total ports is %d\n", nb_ports);
+ printf("Done\n");
+ return;
+}
+
+void
+detach_port_device(portid_t port_id)
+{
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return;
+
if (ports[port_id].port_status != RTE_PORT_CLOSED) {
if (ports[port_id].port_status != RTE_PORT_STOPPED) {
printf("Port not stopped\n");
return;
}
printf("Port was not closed\n");
- if (ports[port_id].flow_list)
- port_flow_flush(port_id);
}
- if (rte_dev_remove(dev) != 0) {
- TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
+ detach_device(rte_eth_devices[port_id].device);
+}
+
+void
+detach_devargs(char *identifier)
+{
+ struct rte_dev_iterator iterator;
+ struct rte_devargs da;
+ portid_t port_id;
+
+ printf("Removing a device...\n");
+
+ memset(&da, 0, sizeof(da));
+ if (rte_devargs_parsef(&da, "%s", identifier)) {
+ printf("cannot parse identifier\n");
+ if (da.args)
+ free(da.args);
return;
}
- RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
- /* reset mapping between old ports and removed device */
- rte_eth_devices[sibling].device = NULL;
- if (ports[sibling].port_status != RTE_PORT_CLOSED) {
- /* sibling ports are forced to be closed */
- ports[sibling].port_status = RTE_PORT_CLOSED;
- printf("Port %u is closed\n", sibling);
+
+ RTE_ETH_FOREACH_MATCHING_DEV(port_id, identifier, &iterator) {
+ if (ports[port_id].port_status != RTE_PORT_CLOSED) {
+ if (ports[port_id].port_status != RTE_PORT_STOPPED) {
+ printf("Port %u not stopped\n", port_id);
+ rte_eth_iterator_cleanup(&iterator);
+ return;
+ }
+ port_flow_flush(port_id);
}
}
+ if (rte_eal_hotplug_remove(da.bus->name, da.name) != 0) {
+ TESTPMD_LOG(ERR, "Failed to detach device %s(%s)\n",
+ da.name, da.bus->name);
+ return;
+ }
+
remove_invalid_ports();
- printf("Device of port %u is detached\n", port_id);
+ printf("Device %s is detached\n", identifier);
printf("Now total ports is %d\n", nb_ports);
printf("Done\n");
- return;
}
void
pmd_test_exit(void)
{
portid_t pt_id;
+ unsigned int i;
int ret;
- int i;
if (test_done == 0)
stop_packet_forwarding();
- for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
if (mempools[i]) {
if (mp_alloc_type == MP_ALLOC_ANON)
rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
return;
}
}
- for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
if (mempools[i])
rte_mempool_free(mempools[i]);
}
cmd_func_t cmd_func;
};
-#define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
-
/* Check the link status of all ports in up to 9s, and print them finally */
static void
check_all_ports_link_status(uint32_t port_mask)
portid_t portid;
uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
+ int ret;
+ char link_status[RTE_ETH_LINK_MAX_STR_LEN];
printf("Checking link statuses...\n");
fflush(stdout);
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
- rte_eth_link_get_nowait(portid, &link);
+ ret = rte_eth_link_get_nowait(portid, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ portid, rte_strerror(-ret));
+ continue;
+ }
/* print link status if flag set */
if (print_flag == 1) {
- if (link.link_status)
- printf(
- "Port%d Link Up. speed %u Mbps- %s\n",
- portid, link.link_speed,
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
- ("full-duplex") : ("half-duplex\n"));
- else
- printf("Port %d Link Down\n", portid);
+ rte_eth_link_to_str(link_status,
+ sizeof(link_status), &link);
+ printf("Port %d %s\n", portid, link_status);
continue;
}
/* clear all_ports_up flag if any link down */
}
}
-/*
- * This callback is for remove a port for a device. It has limitation because
- * it is not for multiple port removal for a device.
- * TODO: the device detach invoke will plan to be removed from user side to
- * eal. And convert all PMDs to free port resources on ether device closing.
- */
static void
rmv_port_callback(void *arg)
{
int need_to_start = 0;
int org_no_link_check = no_link_check;
portid_t port_id = (intptr_t)arg;
+ struct rte_device *dev;
RTE_ETH_VALID_PORTID_OR_RET(port_id);
no_link_check = 1;
stop_port(port_id);
no_link_check = org_no_link_check;
+
+ /* Save rte_device pointer before closing ethdev port */
+ dev = rte_eth_devices[port_id].device;
close_port(port_id);
- detach_port_device(port_id);
+ detach_device(dev); /* might be already removed or have more ports */
+
if (need_to_start)
start_packet_forwarding(0);
}
rmv_port_callback, (void *)(intptr_t)port_id))
fprintf(stderr, "Could not set up deferred device removal\n");
break;
+ case RTE_ETH_EVENT_DESTROY:
+ ports[port_id].port_status = RTE_PORT_CLOSED;
+ printf("Port %u is closed\n", port_id);
+ break;
default:
break;
}
for (qid = 0; qid < nb_rxq; qid++) {
offloads = port->rx_conf[qid].offloads;
port->rx_conf[qid] = port->dev_info.default_rxconf;
- port->rx_conf[qid].offloads |= offloads;
+ if (offloads != 0)
+ port->rx_conf[qid].offloads = offloads;
/* Check if any Rx parameters have been passed */
if (rx_pthresh != RTE_PMD_PARAM_UNSET)
for (qid = 0; qid < nb_txq; qid++) {
offloads = port->tx_conf[qid].offloads;
port->tx_conf[qid] = port->dev_info.default_txconf;
- port->tx_conf[qid].offloads |= offloads;
+ if (offloads != 0)
+ port->tx_conf[qid].offloads = offloads;
/* Check if any Tx parameters have been passed */
if (tx_pthresh != RTE_PMD_PARAM_UNSET)
{
portid_t pid;
struct rte_port *port;
+ int ret;
RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
port->dev_conf.fdir_conf = fdir_conf;
- rte_eth_dev_info_get(pid, &port->dev_info);
+
+ ret = eth_dev_info_get_print_err(pid, &port->dev_info);
+ if (ret != 0)
+ return;
+
if (nb_rxq > 1) {
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
if (port->dcb_flag == 0) {
if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
- port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
+ port->dev_conf.rxmode.mq_mode =
+ (enum rte_eth_rx_mq_mode)
+ (rx_mq_mode & ETH_MQ_RX_RSS);
else
port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
}
rxtx_port_config(port);
- rte_eth_macaddr_get(pid, &port->eth_addr);
+ ret = eth_macaddr_get_print_err(pid, &port->eth_addr);
+ if (ret != 0)
+ return;
map_port_queue_stats_mapping_registers(pid, port);
-#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
+#if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS
rte_pmd_ixgbe_bypass_init(pid);
#endif
}
/* set DCB mode of RX and TX of multiple queues */
- eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
+ eth_conf->rxmode.mq_mode =
+ (enum rte_eth_rx_mq_mode)
+ (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB);
eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
} else {
struct rte_eth_dcb_rx_conf *rx_conf =
struct rte_eth_dcb_tx_conf *tx_conf =
ð_conf->tx_adv_conf.dcb_tx_conf;
+ memset(&rss_conf, 0, sizeof(struct rte_eth_rss_conf));
+
rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
if (rc != 0)
return rc;
tx_conf->dcb_tc[i] = i % num_tcs;
}
- eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
+ eth_conf->rxmode.mq_mode =
+ (enum rte_eth_rx_mq_mode)
+ (rx_mq_mode & ETH_MQ_RX_DCB_RSS);
eth_conf->rx_adv_conf.rss_conf = rss_conf;
eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
}
retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
if (retval < 0)
return retval;
- rte_eth_dev_info_get(pid, &rte_port->dev_info);
+
+ retval = eth_dev_info_get_print_err(pid, &rte_port->dev_info);
+ if (retval != 0)
+ return retval;
/* If dev_info.vmdq_pool_base is greater than 0,
* the queue id of vmdq pools is started after pf queues.
for (i = 0; i < RTE_DIM(vlan_tags); i++)
rx_vft_set(pid, vlan_tags[i], 1);
- rte_eth_macaddr_get(pid, &rte_port->eth_addr);
+ retval = eth_macaddr_get_print_err(pid, &rte_port->eth_addr);
+ if (retval != 0)
+ return retval;
+
map_port_queue_stats_mapping_registers(pid, rte_port);
rte_port->dcb_flag = 1;
static void
init_port(void)
{
+ int i;
+
/* Configuration of Ethernet ports. */
ports = rte_zmalloc("testpmd: ports",
sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
"rte_zmalloc(%d struct rte_port) failed\n",
RTE_MAX_ETHPORTS);
}
-
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++)
+ LIST_INIT(&ports[i].flow_tunnel_list);
/* Initialize ports NUMA structures */
memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
if (signum == SIGINT || signum == SIGTERM) {
printf("\nSignal %d received, preparing to exit...\n",
signum);
-#ifdef RTE_LIBRTE_PDUMP
+#ifdef RTE_LIB_PDUMP
/* uninitialize packet capture framework */
rte_pdump_uninit();
#endif
-#ifdef RTE_LIBRTE_LATENCY_STATS
- rte_latencystats_uninit();
+#ifdef RTE_LIB_LATENCYSTATS
+ if (latencystats_enabled != 0)
+ rte_latencystats_uninit();
#endif
force_quit();
/* Set flag to indicate the force termination. */
signal(SIGINT, signal_handler);
signal(SIGTERM, signal_handler);
- diag = rte_eal_init(argc, argv);
- if (diag < 0)
- rte_panic("Cannot init EAL\n");
-
testpmd_logtype = rte_log_register("testpmd");
if (testpmd_logtype < 0)
- rte_panic("Cannot register log type");
+ rte_exit(EXIT_FAILURE, "Cannot register log type");
rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
+ diag = rte_eal_init(argc, argv);
+ if (diag < 0)
+ rte_exit(EXIT_FAILURE, "Cannot init EAL: %s\n",
+ rte_strerror(rte_errno));
+
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
+ rte_exit(EXIT_FAILURE,
+ "Secondary process type not supported.\n");
+
ret = register_eth_event_callback();
if (ret != 0)
- rte_panic("Cannot register for ethdev events");
+ rte_exit(EXIT_FAILURE, "Cannot register for ethdev events");
-#ifdef RTE_LIBRTE_PDUMP
+#ifdef RTE_LIB_PDUMP
/* initialize packet capture framework */
rte_pdump_init();
#endif
set_def_fwd_config();
if (nb_lcores == 0)
- rte_panic("Empty set of forwarding logical cores - check the "
- "core mask supplied in the command parameters\n");
+ rte_exit(EXIT_FAILURE, "No cores defined for forwarding\n"
+ "Check the core mask argument\n");
/* Bitrate/latency stats disabled by default */
-#ifdef RTE_LIBRTE_BITRATE
+#ifdef RTE_LIB_BITRATESTATS
bitrate_enabled = 0;
#endif
-#ifdef RTE_LIBRTE_LATENCY_STATS
+#ifdef RTE_LIB_LATENCYSTATS
latencystats_enabled = 0;
#endif
}
}
- if (start_port(RTE_PORT_ALL) != 0)
+ if (!no_device_start && start_port(RTE_PORT_ALL) != 0)
rte_exit(EXIT_FAILURE, "Start ports failed\n");
/* set all ports to promiscuous mode by default */
- RTE_ETH_FOREACH_DEV(port_id)
- rte_eth_promiscuous_enable(port_id);
+ RTE_ETH_FOREACH_DEV(port_id) {
+ ret = rte_eth_promiscuous_enable(port_id);
+ if (ret != 0)
+ printf("Error during enabling promiscuous mode for port %u: %s - ignore\n",
+ port_id, rte_strerror(-ret));
+ }
/* Init metrics library */
rte_metrics_init(rte_socket_id());
-#ifdef RTE_LIBRTE_LATENCY_STATS
+#ifdef RTE_LIB_LATENCYSTATS
if (latencystats_enabled != 0) {
int ret = rte_latencystats_init(1, NULL);
if (ret)
#endif
/* Setup bitrate stats */
-#ifdef RTE_LIBRTE_BITRATE
+#ifdef RTE_LIB_BITRATESTATS
if (bitrate_enabled != 0) {
bitrate_data = rte_stats_bitrate_create();
if (bitrate_data == NULL)
}
#endif
-#ifdef RTE_LIBRTE_CMDLINE
+#ifdef RTE_LIB_CMDLINE
if (strlen(cmdline_filename) != 0)
cmdline_read_from_file(cmdline_filename);
return 1;
}
- return 0;
+ ret = rte_eal_cleanup();
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "EAL cleanup failed: %s\n", strerror(-ret));
+
+ return EXIT_SUCCESS;
}