X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Ftestpmd.c;h=98c3248c017f28e10c443ed836006d2da6c86afd;hb=92533e9dfe7501a64b54260420cf00ece7352056;hp=ccba71c07610330a3c5ac55cfe26b06a7b008c9a;hpb=2a449871a12dacef6a644254e42175e09a316617;p=dpdk.git diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index ccba71c076..98c3248c01 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -46,18 +46,18 @@ #include #include #include -#ifdef RTE_LIBRTE_IXGBE_PMD +#ifdef RTE_NET_IXGBE #include #endif -#ifdef RTE_LIBRTE_PDUMP +#ifdef RTE_LIB_PDUMP #include #endif #include #include -#ifdef RTE_LIBRTE_BITRATESTATS +#ifdef RTE_LIB_BITRATESTATS #include #endif -#ifdef RTE_LIBRTE_LATENCY_STATS +#ifdef RTE_LIB_LATENCYSTATS #include #endif @@ -83,7 +83,7 @@ uint16_t verbose_level = 0; /**< Silent by default. */ int testpmd_logtype; /**< Log type for testpmd logs */ -/* use master core for command line ? */ +/* use main core for command line ? */ uint8_t interactive = 0; uint8_t auto_start = 0; uint8_t tx_first; @@ -186,7 +186,7 @@ struct fwd_engine * fwd_engines[] = { NULL, }; -struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; +struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT]; uint16_t mempool_flags; struct fwd_config cur_fwd_config; @@ -195,7 +195,10 @@ uint32_t retry_enabled; uint32_t burst_tx_delay_time = BURST_TX_WAIT_US; uint32_t burst_tx_retry_num = BURST_TX_RETRIES; -uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */ +uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */ +uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = { + DEFAULT_MBUF_DATA_SIZE +}; /**< Mbuf data space size. */ uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if * specified on command-line. */ uint16_t stats_period; /**< Period to show statistics (disabled by default) */ @@ -206,6 +209,15 @@ uint16_t stats_period; /**< Period to show statistics (disabled by default) */ */ uint8_t f_quit; +/* + * Configuration of packet segments used to scatter received packets + * if some of split features is configured. + */ +uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT]; +uint8_t rx_pkt_nb_segs; /**< Number of segments to split */ +uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT]; +uint8_t rx_pkt_nb_offs; /**< Number of specified offsets */ + /* * Configuration of packet segments used by the "txonly" processing engine. */ @@ -228,6 +240,7 @@ uint32_t tx_pkt_times_intra; /**< Timings for send scheduling in TXONLY mode, time between packets. */ uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ +uint16_t nb_pkt_flowgen_clones; /**< Number of Tx packet clones to send in flowgen mode. */ uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ /* current configuration is in DCB or not,0 means it is not in DCB mode */ @@ -367,6 +380,9 @@ bool setup_on_probe_event = true; /* Clear ptypes on port initialization. */ uint8_t clear_ptypes = true; +/* Hairpin ports configuration mode. */ +uint16_t hairpin_mode; + /* Pretty printing of ethdev events */ static const char * const eth_event_desc[] = { [RTE_ETH_EVENT_UNKNOWN] = "unknown", @@ -404,13 +420,13 @@ int do_mlockall = 0; * NIC bypass mode configuration options. */ -#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS +#if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS /* The NIC bypass watchdog timeout. */ uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF; #endif -#ifdef RTE_LIBRTE_LATENCY_STATS +#ifdef RTE_LIB_LATENCYSTATS /* * Set when latency stats is enabled in the commandline @@ -428,8 +444,11 @@ lcoreid_t latencystats_lcore_id = -1; * Ethernet device configuration. */ struct rte_eth_rxmode rx_mode = { - .max_rx_pkt_len = RTE_ETHER_MAX_LEN, - /**< Default maximum frame length. */ + /* Default maximum frame length. + * Zero is converted to "RTE_ETHER_MTU + PMD Ethernet overhead" + * in init_config(). + */ + .max_rx_pkt_len = 0, }; struct rte_eth_txmode tx_mode = { @@ -461,15 +480,6 @@ struct rte_fdir_conf fdir_conf = { volatile int test_done = 1; /* stop packet forwarding when set to 1. */ -struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS]; -struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS]; - -struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array; -struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array; - -uint16_t nb_tx_queue_stats_mappings = 0; -uint16_t nb_rx_queue_stats_mappings = 0; - /* * Display zero values by default for xstats */ @@ -488,7 +498,7 @@ uint8_t record_burst_stats; unsigned int num_sockets = 0; unsigned int socket_ids[RTE_MAX_NUMA_NODES]; -#ifdef RTE_LIBRTE_BITRATESTATS +#ifdef RTE_LIB_BITRATESTATS /* Bitrate statistics */ struct rte_stats_bitrates *bitrate_data; lcoreid_t bitrate_lcore_id; @@ -503,10 +513,13 @@ uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; */ enum rte_eth_rx_mq_mode rx_mq_mode = ETH_MQ_RX_VMDQ_DCB_RSS; +/* + * Used to set forced link speed + */ +uint32_t eth_link_speed; + /* Forward function declarations */ static void setup_attached_port(portid_t pi); -static void map_port_queue_stats_mapping_registers(portid_t pi, - struct rte_port *port); static void check_all_ports_link_status(uint32_t port_mask); static int eth_event_callback(portid_t port_id, enum rte_eth_event_type type, @@ -566,7 +579,7 @@ set_default_fwd_lcores_config(void) } socket_ids[num_sockets++] = sock_num; } - if (i == rte_get_master_lcore()) + if (i == rte_get_main_lcore()) continue; fwd_lcores_cpuids[nb_lc++] = i; } @@ -955,14 +968,14 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id, */ static struct rte_mempool * mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, - unsigned int socket_id) + unsigned int socket_id, uint16_t size_idx) { char pool_name[RTE_MEMPOOL_NAMESIZE]; struct rte_mempool *rte_mp = NULL; uint32_t mb_size; mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; - mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); + mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx); TESTPMD_LOG(INFO, "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", @@ -1442,6 +1455,11 @@ init_config(void) rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n"); + ret = update_jumbo_frame_offload(pid); + if (ret != 0) + printf("Updating jumbo frame offload failed for port %u\n", + pid); + if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) port->dev_conf.txmode.offloads &= @@ -1471,6 +1489,9 @@ init_config(void) port->tx_conf[k].offloads = port->dev_conf.txmode.offloads; + if (eth_link_speed) + port->dev_conf.link_speeds = eth_link_speed; + /* set flag to initialize port/queue */ port->need_reconfig = 1; port->need_reconfig_queues = 1; @@ -1485,8 +1506,8 @@ init_config(void) port->dev_info.rx_desc_lim.nb_mtu_seg_max; if ((data_size + RTE_PKTMBUF_HEADROOM) > - mbuf_data_size) { - mbuf_data_size = data_size + + mbuf_data_size[0]) { + mbuf_data_size[0] = data_size + RTE_PKTMBUF_HEADROOM; warning = 1; } @@ -1494,9 +1515,9 @@ init_config(void) } if (warning) - TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n", - mbuf_data_size); - + TESTPMD_LOG(WARNING, + "Configured mbuf size of the first segment %hu\n", + mbuf_data_size[0]); /* * Create pools of mbuf. * If NUMA support is disabled, create a single pool of mbuf in @@ -1516,21 +1537,23 @@ init_config(void) } if (numa_support) { - uint8_t i; + uint8_t i, j; for (i = 0; i < num_sockets; i++) - mempools[i] = mbuf_pool_create(mbuf_data_size, - nb_mbuf_per_pool, - socket_ids[i]); + for (j = 0; j < mbuf_data_size_n; j++) + mempools[i * MAX_SEGS_BUFFER_SPLIT + j] = + mbuf_pool_create(mbuf_data_size[j], + nb_mbuf_per_pool, + socket_ids[i], j); } else { - if (socket_num == UMA_NO_CONFIG) - mempools[0] = mbuf_pool_create(mbuf_data_size, - nb_mbuf_per_pool, 0); - else - mempools[socket_num] = mbuf_pool_create - (mbuf_data_size, - nb_mbuf_per_pool, - socket_num); + uint8_t i; + + for (i = 0; i < mbuf_data_size_n; i++) + mempools[i] = mbuf_pool_create + (mbuf_data_size[i], + nb_mbuf_per_pool, + socket_num == UMA_NO_CONFIG ? + 0 : socket_num, i); } init_port_config(); @@ -1542,10 +1565,10 @@ init_config(void) */ for (lc_id = 0; lc_id < nb_lcores; lc_id++) { mbp = mbuf_pool_find( - rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id])); + rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0); if (mbp == NULL) - mbp = mbuf_pool_find(0); + mbp = mbuf_pool_find(0, 0); fwd_lcores[lc_id]->mbp = mbp; /* initialize GSO context */ fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp; @@ -1781,6 +1804,8 @@ fwd_stream_stats_display(streamid_t stream_id) " Rx- bad outer L4 checksum: %-14"PRIu64"\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, fs->rx_bad_outer_l4_csum); + printf(" RX- bad outer IP checksum: %-14"PRIu64"\n", + fs->rx_bad_outer_ip_csum); } else { printf("\n"); } @@ -1803,6 +1828,7 @@ fwd_stats_display(void) uint64_t rx_bad_ip_csum; uint64_t rx_bad_l4_csum; uint64_t rx_bad_outer_l4_csum; + uint64_t rx_bad_outer_ip_csum; } ports_stats[RTE_MAX_ETHPORTS]; uint64_t total_rx_dropped = 0; uint64_t total_tx_dropped = 0; @@ -1835,13 +1861,13 @@ fwd_stats_display(void) ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; ports_stats[fs->rx_port].rx_bad_outer_l4_csum += fs->rx_bad_outer_l4_csum; + ports_stats[fs->rx_port].rx_bad_outer_ip_csum += + fs->rx_bad_outer_ip_csum; if (record_core_cycles) fwd_cycles += fs->core_cycles; } for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { - uint8_t j; - pt_id = fwd_ports_ids[i]; port = &ports[pt_id]; @@ -1864,88 +1890,37 @@ fwd_stats_display(void) printf("\n %s Forward statistics for port %-2d %s\n", fwd_stats_border, pt_id, fwd_stats_border); - if (!port->rx_queue_stats_mapping_enabled && - !port->tx_queue_stats_mapping_enabled) { - printf(" RX-packets: %-14"PRIu64 - " RX-dropped: %-14"PRIu64 - "RX-total: %-"PRIu64"\n", - stats.ipackets, stats.imissed, - stats.ipackets + stats.imissed); - - if (cur_fwd_eng == &csum_fwd_engine) - printf(" Bad-ipcsum: %-14"PRIu64 - " Bad-l4csum: %-14"PRIu64 - "Bad-outer-l4csum: %-14"PRIu64"\n", - ports_stats[pt_id].rx_bad_ip_csum, - ports_stats[pt_id].rx_bad_l4_csum, - ports_stats[pt_id].rx_bad_outer_l4_csum); - if (stats.ierrors + stats.rx_nombuf > 0) { - printf(" RX-error: %-"PRIu64"\n", - stats.ierrors); - printf(" RX-nombufs: %-14"PRIu64"\n", - stats.rx_nombuf); - } + printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64 + "RX-total: %-"PRIu64"\n", stats.ipackets, stats.imissed, + stats.ipackets + stats.imissed); - printf(" TX-packets: %-14"PRIu64 - " TX-dropped: %-14"PRIu64 - "TX-total: %-"PRIu64"\n", - stats.opackets, ports_stats[pt_id].tx_dropped, - stats.opackets + ports_stats[pt_id].tx_dropped); - } else { - printf(" RX-packets: %14"PRIu64 - " RX-dropped:%14"PRIu64 - " RX-total:%14"PRIu64"\n", - stats.ipackets, stats.imissed, - stats.ipackets + stats.imissed); - - if (cur_fwd_eng == &csum_fwd_engine) - printf(" Bad-ipcsum:%14"PRIu64 - " Bad-l4csum:%14"PRIu64 - " Bad-outer-l4csum: %-14"PRIu64"\n", - ports_stats[pt_id].rx_bad_ip_csum, - ports_stats[pt_id].rx_bad_l4_csum, - ports_stats[pt_id].rx_bad_outer_l4_csum); - if ((stats.ierrors + stats.rx_nombuf) > 0) { - printf(" RX-error:%"PRIu64"\n", stats.ierrors); - printf(" RX-nombufs: %14"PRIu64"\n", - stats.rx_nombuf); - } - - printf(" TX-packets: %14"PRIu64 - " TX-dropped:%14"PRIu64 - " TX-total:%14"PRIu64"\n", - stats.opackets, ports_stats[pt_id].tx_dropped, - stats.opackets + ports_stats[pt_id].tx_dropped); + if (cur_fwd_eng == &csum_fwd_engine) { + printf(" Bad-ipcsum: %-14"PRIu64 + " Bad-l4csum: %-14"PRIu64 + "Bad-outer-l4csum: %-14"PRIu64"\n", + ports_stats[pt_id].rx_bad_ip_csum, + ports_stats[pt_id].rx_bad_l4_csum, + ports_stats[pt_id].rx_bad_outer_l4_csum); + printf(" Bad-outer-ipcsum: %-14"PRIu64"\n", + ports_stats[pt_id].rx_bad_outer_ip_csum); + } + if (stats.ierrors + stats.rx_nombuf > 0) { + printf(" RX-error: %-"PRIu64"\n", stats.ierrors); + printf(" RX-nombufs: %-14"PRIu64"\n", stats.rx_nombuf); } + printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64 + "TX-total: %-"PRIu64"\n", + stats.opackets, ports_stats[pt_id].tx_dropped, + stats.opackets + ports_stats[pt_id].tx_dropped); + if (record_burst_stats) { if (ports_stats[pt_id].rx_stream) pkt_burst_stats_display("RX", &ports_stats[pt_id].rx_stream->rx_burst_stats); if (ports_stats[pt_id].tx_stream) pkt_burst_stats_display("TX", - &ports_stats[pt_id].tx_stream->tx_burst_stats); - } - - if (port->rx_queue_stats_mapping_enabled) { - printf("\n"); - for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { - printf(" Stats reg %2d RX-packets:%14"PRIu64 - " RX-errors:%14"PRIu64 - " RX-bytes:%14"PRIu64"\n", - j, stats.q_ipackets[j], - stats.q_errors[j], stats.q_ibytes[j]); - } - printf("\n"); - } - if (port->tx_queue_stats_mapping_enabled) { - for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { - printf(" Stats reg %2d TX-packets:%14"PRIu64 - " TX-bytes:%14" - PRIu64"\n", - j, stats.q_opackets[j], - stats.q_obytes[j]); - } + &ports_stats[pt_id].tx_stream->tx_burst_stats); } printf(" %s--------------------------------%s\n", @@ -2006,6 +1981,7 @@ fwd_stats_reset(void) fs->rx_bad_ip_csum = 0; fs->rx_bad_l4_csum = 0; fs->rx_bad_outer_l4_csum = 0; + fs->rx_bad_outer_ip_csum = 0; memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); @@ -2064,7 +2040,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) struct fwd_stream **fsm; streamid_t nb_fs; streamid_t sm_id; -#ifdef RTE_LIBRTE_BITRATESTATS +#ifdef RTE_LIB_BITRATESTATS uint64_t tics_per_1sec; uint64_t tics_datum; uint64_t tics_current; @@ -2079,7 +2055,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) do { for (sm_id = 0; sm_id < nb_fs; sm_id++) (*pkt_fwd)(fsm[sm_id]); -#ifdef RTE_LIBRTE_BITRATESTATS +#ifdef RTE_LIB_BITRATESTATS if (bitrate_enabled != 0 && bitrate_lcore_id == rte_lcore_id()) { tics_current = rte_rdtsc(); @@ -2092,7 +2068,7 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) } } #endif -#ifdef RTE_LIBRTE_LATENCY_STATS +#ifdef RTE_LIB_LATENCYSTATS if (latencystats_enabled != 0 && latencystats_lcore_id == rte_lcore_id()) rte_latencystats_update(); @@ -2219,11 +2195,6 @@ start_packet_forwarding(int with_tx_first) rxtx_config_display(); fwd_stats_reset(); - for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { - pt_id = fwd_ports_ids[i]; - port = &ports[pt_id]; - map_port_queue_stats_mapping_registers(pt_id, port); - } if (with_tx_first) { port_fwd_begin = tx_only_engine.port_fwd_begin; if (port_fwd_begin != NULL) { @@ -2345,7 +2316,7 @@ port_is_started(portid_t port_id) /* Configure the Rx and Tx hairpin queues for the selected port. */ static int -setup_hairpin_queues(portid_t pi) +setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi) { queueid_t qi; struct rte_eth_hairpin_conf hairpin_conf = { @@ -2354,10 +2325,49 @@ setup_hairpin_queues(portid_t pi) int i; int diag; struct rte_port *port = &ports[pi]; + uint16_t peer_rx_port = pi; + uint16_t peer_tx_port = pi; + uint32_t manual = 1; + uint32_t tx_exp = hairpin_mode & 0x10; + + if (!(hairpin_mode & 0xf)) { + peer_rx_port = pi; + peer_tx_port = pi; + manual = 0; + } else if (hairpin_mode & 0x1) { + peer_tx_port = rte_eth_find_next_owned_by(pi + 1, + RTE_ETH_DEV_NO_OWNER); + if (peer_tx_port >= RTE_MAX_ETHPORTS) + peer_tx_port = rte_eth_find_next_owned_by(0, + RTE_ETH_DEV_NO_OWNER); + if (p_pi != RTE_MAX_ETHPORTS) { + peer_rx_port = p_pi; + } else { + uint16_t next_pi; + + /* Last port will be the peer RX port of the first. */ + RTE_ETH_FOREACH_DEV(next_pi) + peer_rx_port = next_pi; + } + manual = 1; + } else if (hairpin_mode & 0x2) { + if (cnt_pi & 0x1) { + peer_rx_port = p_pi; + } else { + peer_rx_port = rte_eth_find_next_owned_by(pi + 1, + RTE_ETH_DEV_NO_OWNER); + if (peer_rx_port >= RTE_MAX_ETHPORTS) + peer_rx_port = pi; + } + peer_tx_port = peer_rx_port; + manual = 1; + } for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) { - hairpin_conf.peers[0].port = pi; + hairpin_conf.peers[0].port = peer_rx_port; hairpin_conf.peers[0].queue = i + nb_rxq; + hairpin_conf.manual_bind = !!manual; + hairpin_conf.tx_explicit = !!tx_exp; diag = rte_eth_tx_hairpin_queue_setup (pi, qi, nb_txd, &hairpin_conf); i++; @@ -2377,8 +2387,10 @@ setup_hairpin_queues(portid_t pi) return -1; } for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) { - hairpin_conf.peers[0].port = pi; + hairpin_conf.peers[0].port = peer_tx_port; hairpin_conf.peers[0].queue = i + nb_txq; + hairpin_conf.manual_bind = !!manual; + hairpin_conf.tx_explicit = !!tx_exp; diag = rte_eth_rx_hairpin_queue_setup (pi, qi, nb_rxd, &hairpin_conf); i++; @@ -2400,11 +2412,62 @@ setup_hairpin_queues(portid_t pi) return 0; } +/* Configure the Rx with optional split. */ +int +rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id, + uint16_t nb_rx_desc, unsigned int socket_id, + struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) +{ + union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {}; + unsigned int i, mp_n; + int ret; + + if (rx_pkt_nb_segs <= 1 || + (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) { + rx_conf->rx_seg = NULL; + rx_conf->rx_nseg = 0; + ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, + nb_rx_desc, socket_id, + rx_conf, mp); + return ret; + } + for (i = 0; i < rx_pkt_nb_segs; i++) { + struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split; + struct rte_mempool *mpx; + /* + * Use last valid pool for the segments with number + * exceeding the pool index. + */ + mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i; + mpx = mbuf_pool_find(socket_id, mp_n); + /* Handle zero as mbuf data buffer size. */ + rx_seg->length = rx_pkt_seg_lengths[i] ? + rx_pkt_seg_lengths[i] : + mbuf_data_size[mp_n]; + rx_seg->offset = i < rx_pkt_nb_offs ? + rx_pkt_seg_offsets[i] : 0; + rx_seg->mp = mpx ? mpx : mp; + } + rx_conf->rx_nseg = rx_pkt_nb_segs; + rx_conf->rx_seg = rx_useg; + ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc, + socket_id, rx_conf, NULL); + rx_conf->rx_seg = NULL; + rx_conf->rx_nseg = 0; + return ret; +} + int start_port(portid_t pid) { int diag, need_check_link_status = -1; portid_t pi; + portid_t p_pi = RTE_MAX_ETHPORTS; + portid_t pl[RTE_MAX_ETHPORTS]; + portid_t peer_pl[RTE_MAX_ETHPORTS]; + uint16_t cnt_pi = 0; + uint16_t cfg_pi = 0; + int peer_pi; queueid_t qi; struct rte_port *port; struct rte_ether_addr mac_addr; @@ -2498,7 +2561,8 @@ start_port(portid_t pid) if ((numa_support) && (rxring_numa[pi] != NUMA_NO_CONFIG)) { struct rte_mempool * mp = - mbuf_pool_find(rxring_numa[pi]); + mbuf_pool_find + (rxring_numa[pi], 0); if (mp == NULL) { printf("Failed to setup RX queue:" "No mempool allocation" @@ -2507,14 +2571,15 @@ start_port(portid_t pid) return -1; } - diag = rte_eth_rx_queue_setup(pi, qi, + diag = rx_queue_setup(pi, qi, port->nb_rx_desc[qi], rxring_numa[pi], &(port->rx_conf[qi]), mp); } else { struct rte_mempool *mp = - mbuf_pool_find(port->socket_id); + mbuf_pool_find + (port->socket_id, 0); if (mp == NULL) { printf("Failed to setup RX queue:" "No mempool allocation" @@ -2522,7 +2587,7 @@ start_port(portid_t pid) port->socket_id); return -1; } - diag = rte_eth_rx_queue_setup(pi, qi, + diag = rx_queue_setup(pi, qi, port->nb_rx_desc[qi], port->socket_id, &(port->rx_conf[qi]), @@ -2544,7 +2609,7 @@ start_port(portid_t pid) return -1; } /* setup hairpin queues */ - if (setup_hairpin_queues(pi) != 0) + if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0) return -1; } configure_rxtx_dump_callbacks(verbose_level); @@ -2557,6 +2622,9 @@ start_port(portid_t pid) pi); } + p_pi = pi; + cnt_pi++; + /* start port */ if (rte_eth_dev_start(pi) < 0) { printf("Fail to start port %d\n", pi); @@ -2581,6 +2649,8 @@ start_port(portid_t pid) /* at least one port started, need checking link status */ need_check_link_status = 1; + + pl[cfg_pi++] = pi; } if (need_check_link_status == 1 && !no_link_check) @@ -2588,6 +2658,50 @@ start_port(portid_t pid) else if (need_check_link_status == 0) printf("Please stop the ports first\n"); + if (hairpin_mode & 0xf) { + uint16_t i; + int j; + + /* bind all started hairpin ports */ + for (i = 0; i < cfg_pi; i++) { + pi = pl[i]; + /* bind current Tx to all peer Rx */ + peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, + RTE_MAX_ETHPORTS, 1); + if (peer_pi < 0) + return peer_pi; + for (j = 0; j < peer_pi; j++) { + if (!port_is_started(peer_pl[j])) + continue; + diag = rte_eth_hairpin_bind(pi, peer_pl[j]); + if (diag < 0) { + printf("Error during binding hairpin" + " Tx port %u to %u: %s\n", + pi, peer_pl[j], + rte_strerror(-diag)); + return -1; + } + } + /* bind all peer Tx to current Rx */ + peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, + RTE_MAX_ETHPORTS, 0); + if (peer_pi < 0) + return peer_pi; + for (j = 0; j < peer_pi; j++) { + if (!port_is_started(peer_pl[j])) + continue; + diag = rte_eth_hairpin_bind(peer_pl[j], pi); + if (diag < 0) { + printf("Error during binding hairpin" + " Tx port %u to %u: %s\n", + peer_pl[j], pi, + rte_strerror(-diag)); + return -1; + } + } + } + } + printf("Done\n"); return 0; } @@ -2598,6 +2712,8 @@ stop_port(portid_t pid) portid_t pi; struct rte_port *port; int need_check_link_status = 0; + portid_t peer_pl[RTE_MAX_ETHPORTS]; + int peer_pi; if (dcb_test) { dcb_test = 0; @@ -2628,7 +2744,28 @@ stop_port(portid_t pid) RTE_PORT_HANDLING) == 0) continue; - rte_eth_dev_stop(pi); + if (hairpin_mode & 0xf) { + int j; + + rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS); + /* unbind all peer Tx from current Rx */ + peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl, + RTE_MAX_ETHPORTS, 0); + if (peer_pi < 0) + continue; + for (j = 0; j < peer_pi; j++) { + if (!port_is_started(peer_pl[j])) + continue; + rte_eth_hairpin_unbind(peer_pl[j], pi); + } + } + + if (port->flow_list) + port_flow_flush(pi); + + if (rte_eth_dev_stop(pi) != 0) + RTE_LOG(ERR, EAL, "rte_eth_dev_stop failed for port %u\n", + pi); if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0) @@ -2909,13 +3046,13 @@ void pmd_test_exit(void) { portid_t pt_id; + unsigned int i; int ret; - int i; if (test_done == 0) stop_packet_forwarding(); - for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { + for (i = 0 ; i < RTE_DIM(mempools) ; i++) { if (mempools[i]) { if (mp_alloc_type == MP_ALLOC_ANON) rte_mempool_mem_iter(mempools[i], dma_unmap_cb, @@ -2959,7 +3096,7 @@ pmd_test_exit(void) return; } } - for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { + for (i = 0 ; i < RTE_DIM(mempools) ; i++) { if (mempools[i]) rte_mempool_free(mempools[i]); } @@ -3172,84 +3309,6 @@ dev_event_callback(const char *device_name, enum rte_dev_event_type type, } } -static int -set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) -{ - uint16_t i; - int diag; - uint8_t mapping_found = 0; - - for (i = 0; i < nb_tx_queue_stats_mappings; i++) { - if ((tx_queue_stats_mappings[i].port_id == port_id) && - (tx_queue_stats_mappings[i].queue_id < nb_txq )) { - diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id, - tx_queue_stats_mappings[i].queue_id, - tx_queue_stats_mappings[i].stats_counter_id); - if (diag != 0) - return diag; - mapping_found = 1; - } - } - if (mapping_found) - port->tx_queue_stats_mapping_enabled = 1; - return 0; -} - -static int -set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) -{ - uint16_t i; - int diag; - uint8_t mapping_found = 0; - - for (i = 0; i < nb_rx_queue_stats_mappings; i++) { - if ((rx_queue_stats_mappings[i].port_id == port_id) && - (rx_queue_stats_mappings[i].queue_id < nb_rxq )) { - diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id, - rx_queue_stats_mappings[i].queue_id, - rx_queue_stats_mappings[i].stats_counter_id); - if (diag != 0) - return diag; - mapping_found = 1; - } - } - if (mapping_found) - port->rx_queue_stats_mapping_enabled = 1; - return 0; -} - -static void -map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) -{ - int diag = 0; - - diag = set_tx_queue_stats_mapping_registers(pi, port); - if (diag != 0) { - if (diag == -ENOTSUP) { - port->tx_queue_stats_mapping_enabled = 0; - printf("TX queue stats mapping not supported port id=%d\n", pi); - } - else - rte_exit(EXIT_FAILURE, - "set_tx_queue_stats_mapping_registers " - "failed for port id=%d diag=%d\n", - pi, diag); - } - - diag = set_rx_queue_stats_mapping_registers(pi, port); - if (diag != 0) { - if (diag == -ENOTSUP) { - port->rx_queue_stats_mapping_enabled = 0; - printf("RX queue stats mapping not supported port id=%d\n", pi); - } - else - rte_exit(EXIT_FAILURE, - "set_rx_queue_stats_mapping_registers " - "failed for port id=%d diag=%d\n", - pi, diag); - } -} - static void rxtx_port_config(struct rte_port *port) { @@ -3307,6 +3366,80 @@ rxtx_port_config(struct rte_port *port) } } +/* + * Helper function to arrange max_rx_pktlen value and JUMBO_FRAME offload, + * MTU is also aligned if JUMBO_FRAME offload is not set. + * + * port->dev_info should be set before calling this function. + * + * return 0 on success, negative on error + */ +int +update_jumbo_frame_offload(portid_t portid) +{ + struct rte_port *port = &ports[portid]; + uint32_t eth_overhead; + uint64_t rx_offloads; + int ret; + bool on; + + /* Update the max_rx_pkt_len to have MTU as RTE_ETHER_MTU */ + if (port->dev_info.max_mtu != UINT16_MAX && + port->dev_info.max_rx_pktlen > port->dev_info.max_mtu) + eth_overhead = port->dev_info.max_rx_pktlen - + port->dev_info.max_mtu; + else + eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + rx_offloads = port->dev_conf.rxmode.offloads; + + /* Default config value is 0 to use PMD specific overhead */ + if (port->dev_conf.rxmode.max_rx_pkt_len == 0) + port->dev_conf.rxmode.max_rx_pkt_len = RTE_ETHER_MTU + eth_overhead; + + if (port->dev_conf.rxmode.max_rx_pkt_len <= RTE_ETHER_MTU + eth_overhead) { + rx_offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + on = false; + } else { + if ((port->dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { + printf("Frame size (%u) is not supported by port %u\n", + port->dev_conf.rxmode.max_rx_pkt_len, + portid); + return -1; + } + rx_offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + on = true; + } + + if (rx_offloads != port->dev_conf.rxmode.offloads) { + uint16_t qid; + + port->dev_conf.rxmode.offloads = rx_offloads; + + /* Apply JUMBO_FRAME offload configuration to Rx queue(s) */ + for (qid = 0; qid < port->dev_info.nb_rx_queues; qid++) { + if (on) + port->rx_conf[qid].offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + else + port->rx_conf[qid].offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; + } + } + + /* If JUMBO_FRAME is set MTU conversion done by ethdev layer, + * if unset do it here + */ + if ((rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) { + ret = rte_eth_dev_set_mtu(portid, + port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead); + if (ret) + printf("Failed to set MTU to %u for port %u\n", + port->dev_conf.rxmode.max_rx_pkt_len - eth_overhead, + portid); + } + + return 0; +} + void init_port_config(void) { @@ -3346,8 +3479,7 @@ init_port_config(void) if (ret != 0) return; - map_port_queue_stats_mapping_registers(pid, port); -#if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS +#if defined RTE_NET_IXGBE && defined RTE_LIBRTE_IXGBE_BYPASS rte_pmd_ixgbe_bypass_init(pid); #endif @@ -3557,8 +3689,6 @@ init_port_dcb_config(portid_t pid, if (retval != 0) return retval; - map_port_queue_stats_mapping_registers(pid, rte_port); - rte_port->dcb_flag = 1; return 0; @@ -3567,6 +3697,8 @@ init_port_dcb_config(portid_t pid, static void init_port(void) { + int i; + /* Configuration of Ethernet ports. */ ports = rte_zmalloc("testpmd: ports", sizeof(struct rte_port) * RTE_MAX_ETHPORTS, @@ -3576,7 +3708,8 @@ init_port(void) "rte_zmalloc(%d struct rte_port) failed\n", RTE_MAX_ETHPORTS); } - + for (i = 0; i < RTE_MAX_ETHPORTS; i++) + LIST_INIT(&ports[i].flow_tunnel_list); /* Initialize ports NUMA structures */ memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); @@ -3613,11 +3746,11 @@ signal_handler(int signum) if (signum == SIGINT || signum == SIGTERM) { printf("\nSignal %d received, preparing to exit...\n", signum); -#ifdef RTE_LIBRTE_PDUMP +#ifdef RTE_LIB_PDUMP /* uninitialize packet capture framework */ rte_pdump_uninit(); #endif -#ifdef RTE_LIBRTE_LATENCY_STATS +#ifdef RTE_LIB_LATENCYSTATS if (latencystats_enabled != 0) rte_latencystats_uninit(); #endif @@ -3659,7 +3792,7 @@ main(int argc, char** argv) if (ret != 0) rte_exit(EXIT_FAILURE, "Cannot register for ethdev events"); -#ifdef RTE_LIBRTE_PDUMP +#ifdef RTE_LIB_PDUMP /* initialize packet capture framework */ rte_pdump_init(); #endif @@ -3682,10 +3815,10 @@ main(int argc, char** argv) "Check the core mask argument\n"); /* Bitrate/latency stats disabled by default */ -#ifdef RTE_LIBRTE_BITRATESTATS +#ifdef RTE_LIB_BITRATESTATS bitrate_enabled = 0; #endif -#ifdef RTE_LIBRTE_LATENCY_STATS +#ifdef RTE_LIB_LATENCYSTATS latencystats_enabled = 0; #endif @@ -3764,7 +3897,7 @@ main(int argc, char** argv) /* Init metrics library */ rte_metrics_init(rte_socket_id()); -#ifdef RTE_LIBRTE_LATENCY_STATS +#ifdef RTE_LIB_LATENCYSTATS if (latencystats_enabled != 0) { int ret = rte_latencystats_init(1, NULL); if (ret) @@ -3776,7 +3909,7 @@ main(int argc, char** argv) #endif /* Setup bitrate stats */ -#ifdef RTE_LIBRTE_BITRATESTATS +#ifdef RTE_LIB_BITRATESTATS if (bitrate_enabled != 0) { bitrate_data = rte_stats_bitrate_create(); if (bitrate_data == NULL) @@ -3786,7 +3919,7 @@ main(int argc, char** argv) } #endif -#ifdef RTE_LIBRTE_CMDLINE +#ifdef RTE_LIB_CMDLINE if (strlen(cmdline_filename) != 0) cmdline_read_from_file(cmdline_filename);