X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Ftestpmd.c;h=e1da961311f847ad71d5d7ca01d93c5e294adb9b;hb=ff6db8829678396d2d10b3c29744d36149608982;hp=97ae52e17ecdd8661c9bf2dac088d75d987b807b;hpb=a7db3afce75346832059d8bfe54a8f81945fb213;p=dpdk.git diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 97ae52e17e..e1da961311 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include @@ -55,7 +54,9 @@ #include #endif #include +#ifdef RTE_LIB_METRICS #include +#endif #ifdef RTE_LIB_BITRATESTATS #include #endif @@ -83,7 +84,13 @@ #endif #define EXTMEM_HEAP_NAME "extmem" -#define EXTBUF_ZONE_SIZE RTE_PGSIZE_2M +/* + * Zone size with the malloc overhead (max of debug and release variants) + * must fit into the smallest supported hugepage size (2M), + * so that an IOVA-contiguous zone of this size can always be allocated + * if there are free 2M hugepages. + */ +#define EXTBUF_ZONE_SIZE (RTE_PGSIZE_2M - 4 * RTE_CACHE_LINE_SIZE) uint16_t verbose_level = 0; /**< Silent by default. */ int testpmd_logtype; /**< Log type for testpmd logs */ @@ -188,6 +195,7 @@ struct fwd_engine * fwd_engines[] = { #ifdef RTE_LIBRTE_IEEE1588 &ieee1588_fwd_engine, #endif + &shared_rxq_engine, NULL, }; @@ -208,12 +216,22 @@ uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if * specified on command-line. */ uint16_t stats_period; /**< Period to show statistics (disabled by default) */ +/** Extended statistics to show. */ +struct rte_eth_xstat_name *xstats_display; + +unsigned int xstats_display_num; /**< Size of extended statistics to show */ + /* * In container, it cannot terminate the process which running with 'stats-period' * option. Set flag to exit stats period loop after received SIGINT/SIGTERM. */ uint8_t f_quit; +/* + * Max Rx frame size, set by '--max-pkt-len' parameter. + */ +uint32_t max_rx_pkt_len; + /* * Configuration of packet segments used to scatter received packets * if some of split features is configured. @@ -338,7 +356,7 @@ uint64_t noisy_lkup_num_reads_writes; /* * Receive Side Scaling (RSS) configuration. */ -uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */ +uint64_t rss_hf = RTE_ETH_RSS_IP; /* RSS IP by default. */ /* * Port topology configuration @@ -437,7 +455,7 @@ uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; uint8_t latencystats_enabled; /* - * Lcore ID to serive latency statistics. + * Lcore ID to service latency statistics. */ lcoreid_t latencystats_lcore_id = -1; @@ -446,21 +464,15 @@ lcoreid_t latencystats_lcore_id = -1; /* * Ethernet device configuration. */ -struct rte_eth_rxmode rx_mode = { - /* Default maximum frame length. - * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead" - * in init_config(). - */ - .max_rx_pkt_len = 0, -}; +struct rte_eth_rxmode rx_mode; struct rte_eth_txmode tx_mode = { - .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, + .offloads = RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, }; -struct rte_fdir_conf fdir_conf = { +struct rte_eth_fdir_conf fdir_conf = { .mode = RTE_FDIR_MODE_NONE, - .pballoc = RTE_FDIR_PBALLOC_64K, + .pballoc = RTE_ETH_FDIR_PBALLOC_64K, .status = RTE_FDIR_REPORT_STATUS, .mask = { .vlan_tci_mask = 0xFFEF, @@ -498,6 +510,11 @@ uint8_t record_core_cycles; */ uint8_t record_burst_stats; +/* + * Number of ports per shared Rx queue group, 0 disable. + */ +uint32_t rxq_share; + unsigned int num_sockets = 0; unsigned int socket_ids[RTE_MAX_NUMA_NODES]; @@ -508,13 +525,15 @@ lcoreid_t bitrate_lcore_id; uint8_t bitrate_enabled; #endif +#ifdef RTE_LIB_GRO struct gro_status gro_ports[RTE_MAX_ETHPORTS]; uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; +#endif /* * hexadecimal bitmask of RX mq mode can be enabled. */ -enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; +enum rte_eth_rx_mq_mode rx_mq_mode = RTE_ETH_MQ_RX_VMDQ_DCB_RSS; /* * Used to set forced link speed @@ -533,6 +552,41 @@ int proc_id; */ unsigned int num_procs = 1; +static void +eth_rx_metadata_negotiate_mp(uint16_t port_id) +{ + uint64_t rx_meta_features = 0; + int ret; + + if (!is_proc_primary()) + return; + + rx_meta_features |= RTE_ETH_RX_METADATA_USER_FLAG; + rx_meta_features |= RTE_ETH_RX_METADATA_USER_MARK; + rx_meta_features |= RTE_ETH_RX_METADATA_TUNNEL_ID; + + ret = rte_eth_rx_metadata_negotiate(port_id, &rx_meta_features); + if (ret == 0) { + if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_FLAG)) { + TESTPMD_LOG(DEBUG, "Flow action FLAG will not affect Rx mbufs on port %u\n", + port_id); + } + + if (!(rx_meta_features & RTE_ETH_RX_METADATA_USER_MARK)) { + TESTPMD_LOG(DEBUG, "Flow action MARK will not affect Rx mbufs on port %u\n", + port_id); + } + + if (!(rx_meta_features & RTE_ETH_RX_METADATA_TUNNEL_ID)) { + TESTPMD_LOG(DEBUG, "Flow tunnel offload support might be limited or unavailable on port %u\n", + port_id); + } + } else if (ret != -ENOTSUP) { + rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port %u: %s\n", + port_id, rte_strerror(-ret)); + } +} + static int eth_dev_configure_mp(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, const struct rte_eth_conf *dev_conf) @@ -586,6 +640,7 @@ static int eth_event_callback(portid_t port_id, static void dev_event_callback(const char *device_name, enum rte_dev_event_type type, void *param); +static void fill_xstats_display_info(void); /* * Check if all the ports are started. @@ -593,12 +648,15 @@ static void dev_event_callback(const char *device_name, */ static int all_ports_started(void); +#ifdef RTE_LIB_GSO struct gso_status gso_ports[RTE_MAX_ETHPORTS]; uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; +#endif /* Holds the registered mbuf dynamic flags names. */ char dynf_names[64][RTE_MBUF_DYN_NAMESIZE]; + /* * Helper function to check if socket is already discovered. * If yes, return positive value. If not, return zero. @@ -1009,12 +1067,11 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, ext_num = 0; break; } - mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE, - socket_id, - RTE_MEMZONE_IOVA_CONTIG | - RTE_MEMZONE_1GB | - RTE_MEMZONE_SIZE_HINT_ONLY, - EXTBUF_ZONE_SIZE); + mz = rte_memzone_reserve(mz_name, EXTBUF_ZONE_SIZE, + socket_id, + RTE_MEMZONE_IOVA_CONTIG | + RTE_MEMZONE_1GB | + RTE_MEMZONE_SIZE_HINT_ONLY); if (mz == NULL) { /* * The caller exits on external buffer creation @@ -1481,14 +1538,29 @@ check_nb_hairpinq(queueid_t hairpinq) return 0; } +static int +get_eth_overhead(struct rte_eth_dev_info *dev_info) +{ + uint32_t eth_overhead; + + if (dev_info->max_mtu != UINT16_MAX && + dev_info->max_rx_pktlen > dev_info->max_mtu) + eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu; + else + eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + return eth_overhead; +} + static void init_config_port_offloads(portid_t pid, uint32_t socket_id) { struct rte_port *port = &ports[pid]; - uint16_t data_size; int ret; int i; + eth_rx_metadata_negotiate_mp(pid); + port->dev_conf.txmode = tx_mode; port->dev_conf.rxmode = rx_mode; @@ -1496,15 +1568,9 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) if (ret != 0) rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); - ret = update_jumbo_frame_offload(pid); - if (ret != 0) - fprintf(stderr, - "Updating jumbo frame offload failed for port %u\n", - pid); - - if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + if (!(port->dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)) port->dev_conf.txmode.offloads &= - ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; + ~RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; /* Apply Rx offloads configuration */ for (i = 0; i < port->dev_info.max_rx_queues; i++) @@ -1516,6 +1582,10 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) if (eth_link_speed) port->dev_conf.link_speeds = eth_link_speed; + if (max_rx_pkt_len) + port->dev_conf.rxmode.mtu = max_rx_pkt_len - + get_eth_overhead(&port->dev_info); + /* set flag to initialize port/queue */ port->need_reconfig = 1; port->need_reconfig_queues = 1; @@ -1528,14 +1598,20 @@ init_config_port_offloads(portid_t pid, uint32_t socket_id) */ if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { - data_size = rx_mode.max_rx_pkt_len / - port->dev_info.rx_desc_lim.nb_mtu_seg_max; - - if ((data_size + RTE_PKTMBUF_HEADROOM) > mbuf_data_size[0]) { - mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM; - TESTPMD_LOG(WARNING, - "Configured mbuf size of the first segment %hu\n", - mbuf_data_size[0]); + uint32_t eth_overhead = get_eth_overhead(&port->dev_info); + uint16_t mtu; + + if (rte_eth_dev_get_mtu(pid, &mtu) == 0) { + uint16_t data_size = (mtu + eth_overhead) / + port->dev_info.rx_desc_lim.nb_mtu_seg_max; + uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM; + + if (buffer_size > mbuf_data_size[0]) { + mbuf_data_size[0] = buffer_size; + TESTPMD_LOG(WARNING, + "Configured mbuf size of the first segment %hu\n", + mbuf_data_size[0]); + } } } } @@ -1547,8 +1623,12 @@ init_config(void) struct rte_mempool *mbp; unsigned int nb_mbuf_per_pool; lcoreid_t lc_id; +#ifdef RTE_LIB_GRO struct rte_gro_param gro_param; +#endif +#ifdef RTE_LIB_GSO uint32_t gso_types; +#endif /* Configuration of logical cores. */ fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", @@ -1631,8 +1711,10 @@ init_config(void) init_port_config(); - gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; +#ifdef RTE_LIB_GSO + gso_types = RTE_ETH_TX_OFFLOAD_TCP_TSO | RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | + RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO | RTE_ETH_TX_OFFLOAD_UDP_TSO; +#endif /* * Records which Mbuf pool to use by each logical core, if needed. */ @@ -1643,6 +1725,7 @@ init_config(void) if (mbp == NULL) mbp = mbuf_pool_find(0, 0); fwd_lcores[lc_id]->mbp = mbp; +#ifdef RTE_LIB_GSO /* initialize GSO context */ fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp; @@ -1650,10 +1733,12 @@ init_config(void) fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN; fwd_lcores[lc_id]->gso_ctx.flag = 0; +#endif } fwd_config_setup(); +#ifdef RTE_LIB_GRO /* create a gro context for each lcore */ gro_param.gro_types = RTE_GRO_TCP_IPV4; gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES; @@ -1667,6 +1752,7 @@ init_config(void) "rte_gro_ctx_create() failed\n"); } } +#endif } @@ -1793,7 +1879,7 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) pktnb_stats[0] = 0; /* Find the next 2 burst sizes with highest occurrences. */ - for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST; nb_pkt++) { + for (nb_pkt = 1; nb_pkt < MAX_PKT_BURST + 1; nb_pkt++) { nb_burst = pbs->pkt_burst_spread[nb_pkt]; if (nb_burst == 0) @@ -2172,16 +2258,10 @@ run_one_txonly_burst_on_core(void *fwd_arg) static void launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) { - port_fwd_begin_t port_fwd_begin; unsigned int i; unsigned int lc_id; int diag; - port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; - if (port_fwd_begin != NULL) { - for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) - (*port_fwd_begin)(fwd_ports_ids[i]); - } for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) { lc_id = fwd_lcores_cpuids[i]; if ((interactive == 0) || (lc_id != rte_lcore_id())) { @@ -2227,23 +2307,46 @@ start_packet_forwarding(int with_tx_first) fprintf(stderr, "Packet forwarding already started\n"); return; } - test_done = 0; fwd_config_setup(); + pkt_fwd_config_display(&cur_fwd_config); + if (!pkt_fwd_shared_rxq_check()) + return; + + port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin; + if (port_fwd_begin != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + if (port_fwd_begin(fwd_ports_ids[i])) { + fprintf(stderr, + "Packet forwarding is not ready\n"); + return; + } + } + } + + if (with_tx_first) { + port_fwd_begin = tx_only_engine.port_fwd_begin; + if (port_fwd_begin != NULL) { + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + if (port_fwd_begin(fwd_ports_ids[i])) { + fprintf(stderr, + "Packet forwarding is not ready\n"); + return; + } + } + } + } + + test_done = 0; + if(!no_flush_rx) flush_fwd_rx_queues(); - pkt_fwd_config_display(&cur_fwd_config); rxtx_config_display(); fwd_stats_reset(); if (with_tx_first) { - port_fwd_begin = tx_only_engine.port_fwd_begin; - if (port_fwd_begin != NULL) { - for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) - (*port_fwd_begin)(fwd_ports_ids[i]); - } while (with_tx_first--) { launch_packet_forwarding( run_one_txonly_burst_on_core); @@ -2418,9 +2521,9 @@ setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) continue; /* Fail to setup rx queue, return */ - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_HANDLING, - RTE_PORT_STOPPED) == 0) + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else fprintf(stderr, "Port %d can not be set back to stopped\n", pi); fprintf(stderr, "Fail to configure port %d hairpin queues\n", @@ -2441,9 +2544,9 @@ setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) continue; /* Fail to setup rx queue, return */ - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_HANDLING, - RTE_PORT_STOPPED) == 0) + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else fprintf(stderr, "Port %d can not be set back to stopped\n", pi); fprintf(stderr, "Fail to configure port %d hairpin queues\n", @@ -2500,6 +2603,108 @@ rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, return ret; } +static int +alloc_xstats_display_info(portid_t pi) +{ + uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp; + uint64_t **prev_values = &ports[pi].xstats_info.prev_values; + uint64_t **curr_values = &ports[pi].xstats_info.curr_values; + + if (xstats_display_num == 0) + return 0; + + *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp)); + if (*ids_supp == NULL) + goto fail_ids_supp; + + *prev_values = calloc(xstats_display_num, + sizeof(**prev_values)); + if (*prev_values == NULL) + goto fail_prev_values; + + *curr_values = calloc(xstats_display_num, + sizeof(**curr_values)); + if (*curr_values == NULL) + goto fail_curr_values; + + ports[pi].xstats_info.allocated = true; + + return 0; + +fail_curr_values: + free(*prev_values); +fail_prev_values: + free(*ids_supp); +fail_ids_supp: + return -ENOMEM; +} + +static void +free_xstats_display_info(portid_t pi) +{ + if (!ports[pi].xstats_info.allocated) + return; + free(ports[pi].xstats_info.ids_supp); + free(ports[pi].xstats_info.prev_values); + free(ports[pi].xstats_info.curr_values); + ports[pi].xstats_info.allocated = false; +} + +/** Fill helper structures for specified port to show extended statistics. */ +static void +fill_xstats_display_info_for_port(portid_t pi) +{ + unsigned int stat, stat_supp; + const char *xstat_name; + struct rte_port *port; + uint64_t *ids_supp; + int rc; + + if (xstats_display_num == 0) + return; + + if (pi == (portid_t)RTE_PORT_ALL) { + fill_xstats_display_info(); + return; + } + + port = &ports[pi]; + if (port->port_status != RTE_PORT_STARTED) + return; + + if (!port->xstats_info.allocated && alloc_xstats_display_info(pi) != 0) + rte_exit(EXIT_FAILURE, + "Failed to allocate xstats display memory\n"); + + ids_supp = port->xstats_info.ids_supp; + for (stat = stat_supp = 0; stat < xstats_display_num; stat++) { + xstat_name = xstats_display[stat].name; + rc = rte_eth_xstats_get_id_by_name(pi, xstat_name, + ids_supp + stat_supp); + if (rc != 0) { + fprintf(stderr, "No xstat '%s' on port %u - skip it %u\n", + xstat_name, pi, stat); + continue; + } + stat_supp++; + } + + port->xstats_info.ids_supp_sz = stat_supp; +} + +/** Fill helper structures for all ports to show extended statistics. */ +static void +fill_xstats_display_info(void) +{ + portid_t pi; + + if (xstats_display_num == 0) + return; + + RTE_ETH_FOREACH_DEV(pi) + fill_xstats_display_info_for_port(pi); +} + int start_port(portid_t pid) { @@ -2524,13 +2729,17 @@ start_port(portid_t pid) need_check_link_status = 0; port = &ports[pi]; - if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED, - RTE_PORT_HANDLING) == 0) { + if (port->port_status == RTE_PORT_STOPPED) + port->port_status = RTE_PORT_HANDLING; + else { fprintf(stderr, "Port %d is now not stopped\n", pi); continue; } if (port->need_reconfig > 0) { + struct rte_eth_conf dev_conf; + int k; + port->need_reconfig = 0; if (flow_isolate_all) { @@ -2552,13 +2761,15 @@ start_port(portid_t pid) pi); return -1; } + /* configure port */ diag = eth_dev_configure_mp(pi, nb_rxq + nb_hairpinq, nb_txq + nb_hairpinq, &(port->dev_conf)); if (diag != 0) { - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else fprintf(stderr, "Port %d can not be set back to stopped\n", pi); @@ -2568,6 +2779,36 @@ start_port(portid_t pid) port->need_reconfig = 1; return -1; } + /* get device configuration*/ + if (0 != + eth_dev_conf_get_print_err(pi, &dev_conf)) { + fprintf(stderr, + "port %d can not get device configuration\n", + pi); + return -1; + } + /* Apply Rx offloads configuration */ + if (dev_conf.rxmode.offloads != + port->dev_conf.rxmode.offloads) { + port->dev_conf.rxmode.offloads |= + dev_conf.rxmode.offloads; + for (k = 0; + k < port->dev_info.max_rx_queues; + k++) + port->rx_conf[k].offloads |= + dev_conf.rxmode.offloads; + } + /* Apply Tx offloads configuration */ + if (dev_conf.txmode.offloads != + port->dev_conf.txmode.offloads) { + port->dev_conf.txmode.offloads |= + dev_conf.txmode.offloads; + for (k = 0; + k < port->dev_info.max_tx_queues; + k++) + port->tx_conf[k].offloads |= + dev_conf.txmode.offloads; + } } if (port->need_reconfig_queues > 0 && is_proc_primary()) { port->need_reconfig_queues = 0; @@ -2589,9 +2830,9 @@ start_port(portid_t pid) continue; /* Fail to setup tx queue, return */ - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_HANDLING, - RTE_PORT_STOPPED) == 0) + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else fprintf(stderr, "Port %d can not be set back to stopped\n", pi); @@ -2641,9 +2882,9 @@ start_port(portid_t pid) continue; /* Fail to setup rx queue, return */ - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_HANDLING, - RTE_PORT_STOPPED) == 0) + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else fprintf(stderr, "Port %d can not be set back to stopped\n", pi); @@ -2678,16 +2919,18 @@ start_port(portid_t pid) pi, rte_strerror(-diag)); /* Fail to setup rx queue, return */ - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else fprintf(stderr, "Port %d can not be set back to stopped\n", pi); continue; } - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0) + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STARTED; + else fprintf(stderr, "Port %d can not be set into started\n", pi); @@ -2750,6 +2993,8 @@ start_port(portid_t pid) } } + fill_xstats_display_info_for_port(pid); + printf("Done\n"); return 0; } @@ -2787,8 +3032,9 @@ stop_port(portid_t pid) } port = &ports[pi]; - if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED, - RTE_PORT_HANDLING) == 0) + if (port->port_status == RTE_PORT_STARTED) + port->port_status = RTE_PORT_HANDLING; + else continue; if (hairpin_mode & 0xf) { @@ -2814,8 +3060,9 @@ stop_port(portid_t pid) RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", pi); - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) + if (port->port_status == RTE_PORT_HANDLING) + port->port_status = RTE_PORT_STOPPED; + else fprintf(stderr, "Port %d can not be set into stopped\n", pi); need_check_link_status = 1; @@ -2878,16 +3125,18 @@ close_port(portid_t pid) } port = &ports[pi]; - if (rte_atomic16_cmpset(&(port->port_status), - RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) { + if (port->port_status == RTE_PORT_CLOSED) { fprintf(stderr, "Port %d is already closed\n", pi); continue; } if (is_proc_primary()) { port_flow_flush(pi); + port_flex_item_flush(pi); rte_eth_dev_close(pi); } + + free_xstats_display_info(pi); } remove_invalid_ports(); @@ -3174,6 +3423,7 @@ pmd_test_exit(void) if (mempools[i]) mempool_free_mp(mempools[i]); } + free(xstats_display); printf("\nBye...\n"); } @@ -3221,7 +3471,7 @@ check_all_ports_link_status(uint32_t port_mask) continue; } /* clear all_ports_up flag if any link down */ - if (link.link_status == ETH_LINK_DOWN) { + if (link.link_status == RTE_ETH_LINK_DOWN) { all_ports_up = 0; break; } @@ -3393,14 +3643,23 @@ dev_event_callback(const char *device_name, enum rte_dev_event_type type, } static void -rxtx_port_config(struct rte_port *port) +rxtx_port_config(portid_t pid) { uint16_t qid; uint64_t offloads; + struct rte_port *port = &ports[pid]; for (qid = 0; qid < nb_rxq; qid++) { offloads = port->rx_conf[qid].offloads; port->rx_conf[qid] = port->dev_info.default_rxconf; + + if (rxq_share > 0 && + (port->dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE)) { + /* Non-zero share group to enable RxQ share. */ + port->rx_conf[qid].share_group = pid / rxq_share + 1; + port->rx_conf[qid].share_qid = qid; /* Equal mapping. */ + } + if (offloads != 0) port->rx_conf[qid].offloads = offloads; @@ -3450,77 +3709,39 @@ rxtx_port_config(struct rte_port *port) } /* - * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload, - * MTU is also aligned if JUMBO_FRAME offload is not set. + * Helper function to set MTU from frame size * * port->dev_info should be set before calling this function. * * return 0 on success, negative on error */ int -update_jumbo_frame_offload(portid_t portid) +update_mtu_from_frame_size(portid_t portid, uint32_t max_rx_pktlen) { struct rte_port *port = &ports[portid]; uint32_t eth_overhead; - uint64_t rx_offloads; - int ret; - bool on; - - /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */ - if (port->dev_info.max_mtu != UINT16_MAX && - port->dev_info.max_rx_pktlen > port->dev_info.max_mtu) - eth_overhead = port->dev_info.max_rx_pktlen - - port->dev_info.max_mtu; - else - eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; - - rx_offloads = port->dev_conf.rxmode.offloads; + uint16_t mtu, new_mtu; - /* Default config value is 0 to use PMD specific overhead */ - if (port->dev_conf.rxmode.max_rx_pkt_len == 0) - port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead; + eth_overhead = get_eth_overhead(&port->dev_info); - if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) { - rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; - on = false; - } else { - if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { - fprintf(stderr, - "Frame size (%u) is not supported by port %u\n", - port->dev_conf.rxmode.max_rx_pkt_len, - portid); - return -1; - } - rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; - on = true; + if (rte_eth_dev_get_mtu(portid, &mtu) != 0) { + printf("Failed to get MTU for port %u\n", portid); + return -1; } - if (rx_offloads != port->dev_conf.rxmode.offloads) { - uint16_t qid; + new_mtu = max_rx_pktlen - eth_overhead; - port->dev_conf.rxmode.offloads = rx_offloads; + if (mtu == new_mtu) + return 0; - /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */ - for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) { - if (on) - port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; - else - port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; - } + if (eth_dev_set_mtu_mp(portid, new_mtu) != 0) { + fprintf(stderr, + "Failed to set MTU to %u for port %u\n", + new_mtu, portid); + return -1; } - /* If JUMBO_FRAME is set MTU conversion done by ethdev layer, - * if unset do it here - */ - if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { - ret = eth_dev_set_mtu_mp(portid, - port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead); - if (ret) - fprintf(stderr, - "Failed to set MTU to %u for port %u\n", - port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead, - portid); - } + port->dev_conf.rxmode.mtu = new_mtu; return 0; } @@ -3530,7 +3751,7 @@ init_port_config(void) { portid_t pid; struct rte_port *port; - int ret; + int ret, i; RTE_ETH_FOREACH_DEV(pid) { port = &ports[pid]; @@ -3550,15 +3771,24 @@ init_port_config(void) } if (port->dcb_flag == 0) { - if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) + if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) { port->dev_conf.rxmode.mq_mode = (enum rte_eth_rx_mq_mode) - (rx_mq_mode & ETH_MQ_RX_RSS); - else - port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; + (rx_mq_mode & RTE_ETH_MQ_RX_RSS); + } else { + port->dev_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_NONE; + port->dev_conf.rxmode.offloads &= + ~RTE_ETH_RX_OFFLOAD_RSS_HASH; + + for (i = 0; + i < port->dev_info.nb_rx_queues; + i++) + port->rx_conf[i].offloads &= + ~RTE_ETH_RX_OFFLOAD_RSS_HASH; + } } - rxtx_port_config(port); + rxtx_port_config(pid); ret = eth_macaddr_get_print_err(pid, &port->eth_addr); if (ret != 0) @@ -3642,9 +3872,9 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, vmdq_rx_conf->enable_default_pool = 0; vmdq_rx_conf->default_pool = 0; vmdq_rx_conf->nb_queue_pools = - (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); + (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); vmdq_tx_conf->nb_queue_pools = - (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS); + (num_tcs == RTE_ETH_4_TCS ? RTE_ETH_32_POOLS : RTE_ETH_16_POOLS); vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools; for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) { @@ -3652,7 +3882,7 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, vmdq_rx_conf->pool_map[i].pools = 1 << (i % vmdq_rx_conf->nb_queue_pools); } - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { vmdq_rx_conf->dcb_tc[i] = i % num_tcs; vmdq_tx_conf->dcb_tc[i] = i % num_tcs; } @@ -3660,8 +3890,8 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, /* set DCB mode of RX and TX of multiple queues */ eth_conf->rxmode.mq_mode = (enum rte_eth_rx_mq_mode) - (rx_mq_mode & ETH_MQ_RX_VMDQ_DCB); - eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB; + (rx_mq_mode & RTE_ETH_MQ_RX_VMDQ_DCB); + eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_VMDQ_DCB; } else { struct rte_eth_dcb_rx_conf *rx_conf = ð_conf->rx_adv_conf.dcb_rx_conf; @@ -3677,23 +3907,23 @@ get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, rx_conf->nb_tcs = num_tcs; tx_conf->nb_tcs = num_tcs; - for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) { rx_conf->dcb_tc[i] = i % num_tcs; tx_conf->dcb_tc[i] = i % num_tcs; } eth_conf->rxmode.mq_mode = (enum rte_eth_rx_mq_mode) - (rx_mq_mode & ETH_MQ_RX_DCB_RSS); + (rx_mq_mode & RTE_ETH_MQ_RX_DCB_RSS); eth_conf->rx_adv_conf.rss_conf = rss_conf; - eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; + eth_conf->txmode.mq_mode = RTE_ETH_MQ_TX_DCB; } if (pfc_en) eth_conf->dcb_capability_en = - ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT; + RTE_ETH_DCB_PG_SUPPORT | RTE_ETH_DCB_PFC_SUPPORT; else - eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT; + eth_conf->dcb_capability_en = RTE_ETH_DCB_PG_SUPPORT; return 0; } @@ -3715,16 +3945,21 @@ init_port_dcb_config(portid_t pid, } rte_port = &ports[pid]; - memset(&port_conf, 0, sizeof(struct rte_eth_conf)); - - port_conf.rxmode = rte_port->dev_conf.rxmode; - port_conf.txmode = rte_port->dev_conf.txmode; + /* retain the original device configuration. */ + memcpy(&port_conf, &rte_port->dev_conf, sizeof(struct rte_eth_conf)); /*set configuration of DCB in vt mode and DCB in non-vt mode*/ retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); if (retval < 0) return retval; - port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + port_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; + /* remove RSS HASH offload for DCB in vt mode */ + if (port_conf.rxmode.mq_mode == RTE_ETH_MQ_RX_VMDQ_DCB) { + port_conf.rxmode.offloads &= ~RTE_ETH_RX_OFFLOAD_RSS_HASH; + for (i = 0; i < nb_rxq; i++) + rte_port->rx_conf[i].offloads &= + ~RTE_ETH_RX_OFFLOAD_RSS_HASH; + } /* re-configure the device . */ retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); @@ -3772,9 +4007,9 @@ init_port_dcb_config(portid_t pid, memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); - rxtx_port_config(rte_port); + rxtx_port_config(pid); /* VLAN filter */ - rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + rte_port->dev_conf.rxmode.offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER; for (i = 0; i < RTE_DIM(vlan_tags); i++) rx_vft_set(pid, vlan_tags[i], 1); @@ -3804,6 +4039,8 @@ init_port(void) "rte_zmalloc(%d struct rte_port) failed\n", RTE_MAX_ETHPORTS); } + for (i = 0; i < RTE_MAX_ETHPORTS; i++) + ports[i].xstats_info.allocated = false; for (i = 0; i < RTE_MAX_ETHPORTS; i++) LIST_INIT(&ports[i].flow_tunnel_list); /* Initialize ports NUMA structures */ @@ -3992,8 +4229,10 @@ main(int argc, char** argv) port_id, rte_strerror(-ret)); } +#ifdef RTE_LIB_METRICS /* Init metrics library */ rte_metrics_init(rte_socket_id()); +#endif #ifdef RTE_LIB_LATENCYSTATS if (latencystats_enabled != 0) { @@ -4017,7 +4256,6 @@ main(int argc, char** argv) rte_stats_bitrate_reg(bitrate_data); } #endif - #ifdef RTE_LIB_CMDLINE if (strlen(cmdline_filename) != 0) cmdline_read_from_file(cmdline_filename);