X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-pmd%2Ftestpmd.c;h=2c736ecac3b56b6721261f9867b2b29248875b6d;hb=6d13ea8e8e49ab957deae2bba5ecf4a4bfe747d1;hp=23352bed31e03001276f0e0252197f0e6e1684d4;hpb=5b590fbe09b6163a55f279bc5d4b85dce39f1d49;p=dpdk.git diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 23352bed31..2c736ecac3 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2017 Intel Corporation */ #include @@ -41,6 +12,7 @@ #include #include #include +#include #include #include @@ -55,9 +27,9 @@ #include #include #include +#include #include #include -#include #include #include #include @@ -68,6 +40,7 @@ #include #include #include +#include #include #include #include @@ -91,7 +64,24 @@ #include "testpmd.h" +#ifndef MAP_HUGETLB +/* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */ +#define HUGE_FLAG (0x40000) +#else +#define HUGE_FLAG MAP_HUGETLB +#endif + +#ifndef MAP_HUGE_SHIFT +/* older kernels (or FreeBSD) will not have this define */ +#define HUGE_SHIFT (26) +#else +#define HUGE_SHIFT MAP_HUGE_SHIFT +#endif + +#define EXTMEM_HEAP_NAME "extmem" + uint16_t verbose_level = 0; /**< Silent by default. */ +int testpmd_logtype; /**< Log type for testpmd logs */ /* use master core for command line ? */ uint8_t interactive = 0; @@ -115,9 +105,31 @@ uint8_t numa_support = 1; /**< numa enabled by default */ uint8_t socket_num = UMA_NO_CONFIG; /* - * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs. + * Select mempool allocation type: + * - native: use regular DPDK memory + * - anon: use regular DPDK memory to create mempool, but populate using + * anonymous memory (may not be IOVA-contiguous) + * - xmem: use externally allocated hugepage memory + */ +uint8_t mp_alloc_type = MP_ALLOC_NATIVE; + +/* + * Store specified sockets on which memory pool to be used by ports + * is allocated. + */ +uint8_t port_numa[RTE_MAX_ETHPORTS]; + +/* + * Store specified sockets on which RX ring to be used by ports + * is allocated. */ -uint8_t mp_anon = 0; +uint8_t rxring_numa[RTE_MAX_ETHPORTS]; + +/* + * Store specified sockets on which TX ring to be used by ports + * is allocated. + */ +uint8_t txring_numa[RTE_MAX_ETHPORTS]; /* * Record the Ethernet address of peer target ports to which packets are @@ -125,7 +137,7 @@ uint8_t mp_anon = 0; * Must be instantiated with the ethernet addresses of peer traffic generator * ports. */ -struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; +struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS]; portid_t nb_peer_eth_addrs = 0; /* @@ -136,6 +148,8 @@ portid_t nb_ports; /**< Number of probed ethernet ports. */ struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */ lcoreid_t nb_lcores; /**< Number of probed logical cores. */ +portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */ + /* * Test Forwarding Configuration. * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores @@ -164,9 +178,9 @@ struct fwd_engine * fwd_engines[] = { &tx_only_engine, &csum_fwd_engine, &icmp_echo_engine, -#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED - &softnic_tm_engine, - &softnic_tm_bypass_engine, + &noisy_vnf_engine, +#if defined RTE_LIBRTE_PMD_SOFTNIC + &softnic_fwd_engine, #endif #ifdef RTE_LIBRTE_IEEE1588 &ieee1588_fwd_engine, @@ -174,6 +188,9 @@ struct fwd_engine * fwd_engines[] = { NULL, }; +struct rte_mempool *mempools[RTE_MAX_NUMA_NODES]; +uint16_t mempool_flags; + struct fwd_config cur_fwd_config; struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */ uint32_t retry_enabled; @@ -203,6 +220,9 @@ uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */ enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF; /**< Split policy for packets to TX. */ +uint8_t txonly_multi_flow; +/**< Whether multiple flows are generated in TXONLY mode. */ + uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */ uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */ @@ -220,9 +240,10 @@ queueid_t nb_txq = 1; /**< Number of TX queues per port. */ /* * Configurable number of RX/TX ring descriptors. + * Defaults are supplied by drivers via ethdev. */ -#define RTE_TEST_RX_DESC_DEFAULT 128 -#define RTE_TEST_TX_DESC_DEFAULT 512 +#define RTE_TEST_RX_DESC_DEFAULT 0 +#define RTE_TEST_TX_DESC_DEFAULT 0 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */ uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */ @@ -260,9 +281,38 @@ int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET; int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET; /* - * Configurable value of TX queue flags. + * Configurable value of buffered packets before sending. + */ +uint16_t noisy_tx_sw_bufsz; + +/* + * Configurable value of packet buffer timeout. */ -int32_t txq_flags = RTE_PMD_PARAM_UNSET; +uint16_t noisy_tx_sw_buf_flush_time; + +/* + * Configurable value for size of VNF internal memory area + * used for simulating noisy neighbour behaviour + */ +uint64_t noisy_lkup_mem_sz; + +/* + * Configurable value of number of random writes done in + * VNF simulation memory area. + */ +uint64_t noisy_lkup_num_writes; + +/* + * Configurable value of number of random reads done in + * VNF simulation memory area. + */ +uint64_t noisy_lkup_num_reads; + +/* + * Configurable value of number of random reads/writes done in + * VNF simulation memory area. + */ +uint64_t noisy_lkup_num_reads_writes; /* * Receive Side Scaling (RSS) configuration. @@ -299,6 +349,26 @@ uint8_t lsc_interrupt = 1; /* enabled by default */ */ uint8_t rmv_interrupt = 1; /* enabled by default */ +uint8_t hot_plug = 0; /**< hotplug disabled by default. */ + +/* After attach, port setup is called on event or by iterator */ +bool setup_on_probe_event = true; + +/* Pretty printing of ethdev events */ +static const char * const eth_event_desc[] = { + [RTE_ETH_EVENT_UNKNOWN] = "unknown", + [RTE_ETH_EVENT_INTR_LSC] = "link state change", + [RTE_ETH_EVENT_QUEUE_STATE] = "queue state", + [RTE_ETH_EVENT_INTR_RESET] = "reset", + [RTE_ETH_EVENT_VF_MBOX] = "VF mbox", + [RTE_ETH_EVENT_IPSEC] = "IPsec", + [RTE_ETH_EVENT_MACSEC] = "MACsec", + [RTE_ETH_EVENT_INTR_RMV] = "device removal", + [RTE_ETH_EVENT_NEW] = "device probed", + [RTE_ETH_EVENT_DESTROY] = "device released", + [RTE_ETH_EVENT_MAX] = NULL, +}; + /* * Display or mask ether events * Default to all events except VF_MBOX @@ -307,8 +377,13 @@ uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) | (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) | (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) | (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) | + (UINT32_C(1) << RTE_ETH_EVENT_IPSEC) | (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) | (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV); +/* + * Decide if all memory are locked for performance. + */ +int do_mlockall = 0; /* * NIC bypass mode configuration options. @@ -339,14 +414,10 @@ lcoreid_t latencystats_lcore_id = -1; */ struct rte_eth_rxmode rx_mode = { .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */ - .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled. */ - .hw_ip_checksum = 0, /**< IP checksum offload disabled. */ - .hw_vlan_filter = 1, /**< VLAN filtering enabled. */ - .hw_vlan_strip = 1, /**< VLAN strip enabled. */ - .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ - .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */ +}; + +struct rte_eth_txmode tx_mode = { + .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE, }; struct rte_fdir_conf fdir_conf = { @@ -354,7 +425,7 @@ struct rte_fdir_conf fdir_conf = { .pballoc = RTE_FDIR_PBALLOC_64K, .status = RTE_FDIR_REPORT_STATUS, .mask = { - .vlan_tci_mask = 0x0, + .vlan_tci_mask = 0xFFEF, .ipv4_mask = { .src_ip = 0xFFFFFFFF, .dst_ip = 0xFFFFFFFF, @@ -383,6 +454,11 @@ struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_a uint16_t nb_tx_queue_stats_mappings = 0; uint16_t nb_rx_queue_stats_mappings = 0; +/* + * Display zero values by default for xstats + */ +uint8_t xstats_hide_zero; + unsigned int num_sockets = 0; unsigned int socket_ids[RTE_MAX_NUMA_NODES]; @@ -396,12 +472,52 @@ uint8_t bitrate_enabled; struct gro_status gro_ports[RTE_MAX_ETHPORTS]; uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES; +struct vxlan_encap_conf vxlan_encap_conf = { + .select_ipv4 = 1, + .select_vlan = 0, + .select_tos_ttl = 0, + .vni = "\x00\x00\x00", + .udp_src = 0, + .udp_dst = RTE_BE16(4789), + .ipv4_src = IPv4(127, 0, 0, 1), + .ipv4_dst = IPv4(255, 255, 255, 255), + .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x11\x11", + .vlan_tci = 0, + .ip_tos = 0, + .ip_ttl = 255, + .eth_src = "\x00\x00\x00\x00\x00\x00", + .eth_dst = "\xff\xff\xff\xff\xff\xff", +}; + +struct nvgre_encap_conf nvgre_encap_conf = { + .select_ipv4 = 1, + .select_vlan = 0, + .tni = "\x00\x00\x00", + .ipv4_src = IPv4(127, 0, 0, 1), + .ipv4_dst = IPv4(255, 255, 255, 255), + .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x00\x01", + .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00" + "\x00\x00\x00\x00\x00\x00\x11\x11", + .vlan_tci = 0, + .eth_src = "\x00\x00\x00\x00\x00\x00", + .eth_dst = "\xff\xff\xff\xff\xff\xff", +}; + /* Forward function declarations */ -static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port); +static void setup_attached_port(portid_t pi); +static void map_port_queue_stats_mapping_registers(portid_t pi, + struct rte_port *port); static void check_all_ports_link_status(uint32_t port_mask); static int eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, void *ret_param); +static void dev_event_callback(const char *device_name, + enum rte_dev_event_type type, + void *param); /* * Check if all the ports are started. @@ -440,6 +556,8 @@ set_default_fwd_lcores_config(void) nb_lc = 0; for (i = 0; i < RTE_MAX_LCORE; i++) { + if (!rte_lcore_is_enabled(i)) + continue; sock_num = rte_lcore_to_socket_id(i); if (new_socket_id(sock_num)) { if (num_sockets >= RTE_MAX_NUMA_NODES) { @@ -449,8 +567,6 @@ set_default_fwd_lcores_config(void) } socket_ids[num_sockets++] = sock_num; } - if (!rte_lcore_is_enabled(i)) - continue; if (i == rte_get_master_lcore()) continue; fwd_lcores_cpuids[nb_lc++] = i; @@ -477,9 +593,21 @@ set_default_fwd_ports_config(void) portid_t pt_id; int i = 0; - RTE_ETH_FOREACH_DEV(pt_id) + RTE_ETH_FOREACH_DEV(pt_id) { fwd_ports_ids[i++] = pt_id; + /* Update sockets info according to the attached device */ + int socket_id = rte_eth_dev_socket_id(pt_id); + if (socket_id >= 0 && new_socket_id(socket_id)) { + if (num_sockets >= RTE_MAX_NUMA_NODES) { + rte_exit(EXIT_FAILURE, + "Total sockets greater than %u\n", + RTE_MAX_NUMA_NODES); + } + socket_ids[num_sockets++] = socket_id; + } + } + nb_cfg_ports = nb_ports; nb_fwd_ports = nb_ports; } @@ -492,10 +620,281 @@ set_def_fwd_config(void) set_default_fwd_ports_config(); } +/* extremely pessimistic estimation of memory required to create a mempool */ +static int +calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out) +{ + unsigned int n_pages, mbuf_per_pg, leftover; + uint64_t total_mem, mbuf_mem, obj_sz; + + /* there is no good way to predict how much space the mempool will + * occupy because it will allocate chunks on the fly, and some of those + * will come from default DPDK memory while some will come from our + * external memory, so just assume 128MB will be enough for everyone. + */ + uint64_t hdr_mem = 128 << 20; + + /* account for possible non-contiguousness */ + obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL); + if (obj_sz > pgsz) { + TESTPMD_LOG(ERR, "Object size is bigger than page size\n"); + return -1; + } + + mbuf_per_pg = pgsz / obj_sz; + leftover = (nb_mbufs % mbuf_per_pg) > 0; + n_pages = (nb_mbufs / mbuf_per_pg) + leftover; + + mbuf_mem = n_pages * pgsz; + + total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz); + + if (total_mem > SIZE_MAX) { + TESTPMD_LOG(ERR, "Memory size too big\n"); + return -1; + } + *out = (size_t)total_mem; + + return 0; +} + +static int +pagesz_flags(uint64_t page_sz) +{ + /* as per mmap() manpage, all page sizes are log2 of page size + * shifted by MAP_HUGE_SHIFT + */ + int log2 = rte_log2_u64(page_sz); + + return (log2 << HUGE_SHIFT); +} + +static void * +alloc_mem(size_t memsz, size_t pgsz, bool huge) +{ + void *addr; + int flags; + + /* allocate anonymous hugepages */ + flags = MAP_ANONYMOUS | MAP_PRIVATE; + if (huge) + flags |= HUGE_FLAG | pagesz_flags(pgsz); + + addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0); + if (addr == MAP_FAILED) + return NULL; + + return addr; +} + +struct extmem_param { + void *addr; + size_t len; + size_t pgsz; + rte_iova_t *iova_table; + unsigned int iova_table_len; +}; + +static int +create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param, + bool huge) +{ + uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */ + RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */ + unsigned int cur_page, n_pages, pgsz_idx; + size_t mem_sz, cur_pgsz; + rte_iova_t *iovas = NULL; + void *addr; + int ret; + + for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) { + /* skip anything that is too big */ + if (pgsizes[pgsz_idx] > SIZE_MAX) + continue; + + cur_pgsz = pgsizes[pgsz_idx]; + + /* if we were told not to allocate hugepages, override */ + if (!huge) + cur_pgsz = sysconf(_SC_PAGESIZE); + + ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz); + if (ret < 0) { + TESTPMD_LOG(ERR, "Cannot calculate memory size\n"); + return -1; + } + + /* allocate our memory */ + addr = alloc_mem(mem_sz, cur_pgsz, huge); + + /* if we couldn't allocate memory with a specified page size, + * that doesn't mean we can't do it with other page sizes, so + * try another one. + */ + if (addr == NULL) + continue; + + /* store IOVA addresses for every page in this memory area */ + n_pages = mem_sz / cur_pgsz; + + iovas = malloc(sizeof(*iovas) * n_pages); + + if (iovas == NULL) { + TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n"); + goto fail; + } + /* lock memory if it's not huge pages */ + if (!huge) + mlock(addr, mem_sz); + + /* populate IOVA addresses */ + for (cur_page = 0; cur_page < n_pages; cur_page++) { + rte_iova_t iova; + size_t offset; + void *cur; + + offset = cur_pgsz * cur_page; + cur = RTE_PTR_ADD(addr, offset); + + /* touch the page before getting its IOVA */ + *(volatile char *)cur = 0; + + iova = rte_mem_virt2iova(cur); + + iovas[cur_page] = iova; + } + + break; + } + /* if we couldn't allocate anything */ + if (iovas == NULL) + return -1; + + param->addr = addr; + param->len = mem_sz; + param->pgsz = cur_pgsz; + param->iova_table = iovas; + param->iova_table_len = n_pages; + + return 0; +fail: + if (iovas) + free(iovas); + if (addr) + munmap(addr, mem_sz); + + return -1; +} + +static int +setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge) +{ + struct extmem_param param; + int socket_id, ret; + + memset(¶m, 0, sizeof(param)); + + /* check if our heap exists */ + socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); + if (socket_id < 0) { + /* create our heap */ + ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME); + if (ret < 0) { + TESTPMD_LOG(ERR, "Cannot create heap\n"); + return -1; + } + } + + ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge); + if (ret < 0) { + TESTPMD_LOG(ERR, "Cannot create memory area\n"); + return -1; + } + + /* we now have a valid memory area, so add it to heap */ + ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME, + param.addr, param.len, param.iova_table, + param.iova_table_len, param.pgsz); + + /* when using VFIO, memory is automatically mapped for DMA by EAL */ + + /* not needed any more */ + free(param.iova_table); + + if (ret < 0) { + TESTPMD_LOG(ERR, "Cannot add memory to heap\n"); + munmap(param.addr, param.len); + return -1; + } + + /* success */ + + TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n", + param.len >> 20); + + return 0; +} +static void +dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, + struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) +{ + uint16_t pid = 0; + int ret; + + RTE_ETH_FOREACH_DEV(pid) { + struct rte_eth_dev *dev = + &rte_eth_devices[pid]; + + ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0, + memhdr->len); + if (ret) { + TESTPMD_LOG(DEBUG, + "unable to DMA unmap addr 0x%p " + "for device %s\n", + memhdr->addr, dev->data->name); + } + } + ret = rte_extmem_unregister(memhdr->addr, memhdr->len); + if (ret) { + TESTPMD_LOG(DEBUG, + "unable to un-register addr 0x%p\n", memhdr->addr); + } +} + +static void +dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused, + struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused) +{ + uint16_t pid = 0; + size_t page_size = sysconf(_SC_PAGESIZE); + int ret; + + ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0, + page_size); + if (ret) { + TESTPMD_LOG(DEBUG, + "unable to register addr 0x%p\n", memhdr->addr); + return; + } + RTE_ETH_FOREACH_DEV(pid) { + struct rte_eth_dev *dev = + &rte_eth_devices[pid]; + + ret = rte_dev_dma_map(dev->device, memhdr->addr, 0, + memhdr->len); + if (ret) { + TESTPMD_LOG(DEBUG, + "unable to DMA map addr 0x%p " + "for device %s\n", + memhdr->addr, dev->data->name); + } + } +} + /* * Configuration initialisation done once at init time. */ -static void +static struct rte_mempool * mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, unsigned int socket_id) { @@ -506,29 +905,64 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size; mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name)); - RTE_LOG(INFO, USER1, + TESTPMD_LOG(INFO, "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n", pool_name, nb_mbuf, mbuf_seg_size, socket_id); - if (mp_anon != 0) { - rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, - mb_size, (unsigned) mb_mempool_cache, - sizeof(struct rte_pktmbuf_pool_private), - socket_id, 0); - if (rte_mp == NULL) - goto err; - - if (rte_mempool_populate_anon(rte_mp) == 0) { - rte_mempool_free(rte_mp); - rte_mp = NULL; - goto err; - } - rte_pktmbuf_pool_init(rte_mp, NULL); - rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); - } else { - /* wrapper to rte_mempool_create() */ - rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, - mb_mempool_cache, 0, mbuf_seg_size, socket_id); + switch (mp_alloc_type) { + case MP_ALLOC_NATIVE: + { + /* wrapper to rte_mempool_create() */ + TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", + rte_mbuf_best_mempool_ops()); + rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, + mb_mempool_cache, 0, mbuf_seg_size, socket_id); + break; + } + case MP_ALLOC_ANON: + { + rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf, + mb_size, (unsigned int) mb_mempool_cache, + sizeof(struct rte_pktmbuf_pool_private), + socket_id, mempool_flags); + if (rte_mp == NULL) + goto err; + + if (rte_mempool_populate_anon(rte_mp) == 0) { + rte_mempool_free(rte_mp); + rte_mp = NULL; + goto err; + } + rte_pktmbuf_pool_init(rte_mp, NULL); + rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); + rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL); + break; + } + case MP_ALLOC_XMEM: + case MP_ALLOC_XMEM_HUGE: + { + int heap_socket; + bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE; + + if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0) + rte_exit(EXIT_FAILURE, "Could not create external memory\n"); + + heap_socket = + rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME); + if (heap_socket < 0) + rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n"); + + TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n", + rte_mbuf_best_mempool_ops()); + rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf, + mb_mempool_cache, 0, mbuf_seg_size, + heap_socket); + break; + } + default: + { + rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n"); + } } err: @@ -539,6 +973,7 @@ err: } else if (verbose_level > 0) { rte_mempool_dump(stdout, rte_mp); } + return rte_mp; } /* @@ -562,6 +997,98 @@ check_socket_id(const unsigned int socket_id) return 0; } +/* + * Get the allowed maximum number of RX queues. + * *pid return the port id which has minimal value of + * max_rx_queues in all ports. + */ +queueid_t +get_allowed_max_nb_rxq(portid_t *pid) +{ + queueid_t allowed_max_rxq = MAX_QUEUE_ID; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + rte_eth_dev_info_get(pi, &dev_info); + if (dev_info.max_rx_queues < allowed_max_rxq) { + allowed_max_rxq = dev_info.max_rx_queues; + *pid = pi; + } + } + return allowed_max_rxq; +} + +/* + * Check input rxq is valid or not. + * If input rxq is not greater than any of maximum number + * of RX queues of all ports, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_rxq(queueid_t rxq) +{ + queueid_t allowed_max_rxq; + portid_t pid = 0; + + allowed_max_rxq = get_allowed_max_nb_rxq(&pid); + if (rxq > allowed_max_rxq) { + printf("Fail: input rxq (%u) can't be greater " + "than max_rx_queues (%u) of port %u\n", + rxq, + allowed_max_rxq, + pid); + return -1; + } + return 0; +} + +/* + * Get the allowed maximum number of TX queues. + * *pid return the port id which has minimal value of + * max_tx_queues in all ports. + */ +queueid_t +get_allowed_max_nb_txq(portid_t *pid) +{ + queueid_t allowed_max_txq = MAX_QUEUE_ID; + portid_t pi; + struct rte_eth_dev_info dev_info; + + RTE_ETH_FOREACH_DEV(pi) { + rte_eth_dev_info_get(pi, &dev_info); + if (dev_info.max_tx_queues < allowed_max_txq) { + allowed_max_txq = dev_info.max_tx_queues; + *pid = pi; + } + } + return allowed_max_txq; +} + +/* + * Check input txq is valid or not. + * If input txq is not greater than any of maximum number + * of TX queues of all ports, it is valid. + * if valid, return 0, else return -1 + */ +int +check_nb_txq(queueid_t txq) +{ + queueid_t allowed_max_txq; + portid_t pid = 0; + + allowed_max_txq = get_allowed_max_nb_txq(&pid); + if (txq > allowed_max_txq) { + printf("Fail: input txq (%u) can't be greater " + "than max_tx_queues (%u) of port %u\n", + txq, + allowed_max_txq, + pid); + return -1; + } + return 0; +} + static void init_config(void) { @@ -573,15 +1100,10 @@ init_config(void) uint8_t port_per_socket[RTE_MAX_NUMA_NODES]; struct rte_gro_param gro_param; uint32_t gso_types; + int k; memset(port_per_socket,0,RTE_MAX_NUMA_NODES); - if (numa_support) { - memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); - memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); - memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); - } - /* Configuration of logical cores. */ fwd_lcores = rte_zmalloc("testpmd: fwd_lcores", sizeof(struct fwd_lcore *) * nb_lcores, @@ -603,24 +1125,48 @@ init_config(void) RTE_ETH_FOREACH_DEV(pid) { port = &ports[pid]; + /* Apply default TxRx configuration for all ports */ + port->dev_conf.txmode = tx_mode; + port->dev_conf.rxmode = rx_mode; rte_eth_dev_info_get(pid, &port->dev_info); + if (!(port->dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + port->dev_conf.txmode.offloads &= + ~DEV_TX_OFFLOAD_MBUF_FAST_FREE; + if (!(port->dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_MATCH_METADATA)) + port->dev_conf.txmode.offloads &= + ~DEV_TX_OFFLOAD_MATCH_METADATA; if (numa_support) { if (port_numa[pid] != NUMA_NO_CONFIG) port_per_socket[port_numa[pid]]++; else { uint32_t socket_id = rte_eth_dev_socket_id(pid); - /* if socket_id is invalid, set to 0 */ + /* + * if socket_id is invalid, + * set to the first available socket. + */ if (check_socket_id(socket_id) < 0) - socket_id = 0; + socket_id = socket_ids[0]; port_per_socket[socket_id]++; } } + /* Apply Rx offloads configuration */ + for (k = 0; k < port->dev_info.max_rx_queues; k++) + port->rx_conf[k].offloads = + port->dev_conf.rxmode.offloads; + /* Apply Tx offloads configuration */ + for (k = 0; k < port->dev_info.max_tx_queues; k++) + port->tx_conf[k].offloads = + port->dev_conf.txmode.offloads; + /* set flag to initialize port/queue */ port->need_reconfig = 1; port->need_reconfig_queues = 1; + port->tx_metadata = 0; } /* @@ -645,20 +1191,24 @@ init_config(void) uint8_t i; for (i = 0; i < num_sockets; i++) - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, - socket_ids[i]); + mempools[i] = mbuf_pool_create(mbuf_data_size, + nb_mbuf_per_pool, + socket_ids[i]); } else { if (socket_num == UMA_NO_CONFIG) - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); + mempools[0] = mbuf_pool_create(mbuf_data_size, + nb_mbuf_per_pool, 0); else - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, - socket_num); + mempools[socket_num] = mbuf_pool_create + (mbuf_data_size, + nb_mbuf_per_pool, + socket_num); } init_port_config(); gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO | - DEV_TX_OFFLOAD_GRE_TNL_TSO; + DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO; /* * Records which Mbuf pool to use by each logical core, if needed. */ @@ -697,6 +1247,19 @@ init_config(void) "rte_gro_ctx_create() failed\n"); } } + +#if defined RTE_LIBRTE_PMD_SOFTNIC + if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) { + RTE_ETH_FOREACH_DEV(pid) { + port = &ports[pid]; + const char *driver = port->dev_info.driver_name; + + if (strcmp(driver, "net_softnic") == 0) + port->softport.fwd_lcore_arg = fwd_lcores; + } + } +#endif + } @@ -747,9 +1310,12 @@ init_fwd_streams(void) else { port->socket_id = rte_eth_dev_socket_id(pid); - /* if socket_id is invalid, set to 0 */ + /* + * if socket_id is invalid, + * set to the first available socket. + */ if (check_socket_id(port->socket_id) < 0) - port->socket_id = 0; + port->socket_id = socket_ids[0]; } } else { @@ -782,18 +1348,23 @@ init_fwd_streams(void) /* init new */ nb_fwd_streams = nb_fwd_streams_new; - fwd_streams = rte_zmalloc("testpmd: fwd_streams", - sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); - if (fwd_streams == NULL) - rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " - "failed\n", nb_fwd_streams); + if (nb_fwd_streams) { + fwd_streams = rte_zmalloc("testpmd: fwd_streams", + sizeof(struct fwd_stream *) * nb_fwd_streams, + RTE_CACHE_LINE_SIZE); + if (fwd_streams == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" + " (struct fwd_stream *)) failed\n", + nb_fwd_streams); - for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { - fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", - sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); - if (fwd_streams[sm_id] == NULL) - rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" - " failed\n"); + for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { + fwd_streams[sm_id] = rte_zmalloc("testpmd:" + " struct fwd_stream", sizeof(struct fwd_stream), + RTE_CACHE_LINE_SIZE); + if (fwd_streams[sm_id] == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc" + "(struct fwd_stream) failed\n"); + } } return 0; @@ -827,6 +1398,9 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) pktnb_stats[1] = pktnb_stats[0]; burst_stats[0] = nb_burst; pktnb_stats[0] = nb_pkt; + } else if (nb_burst > burst_stats[1]) { + burst_stats[1] = nb_burst; + pktnb_stats[1] = nb_pkt; } } if (total_burst == 0) @@ -855,115 +1429,256 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */ static void -fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats) +fwd_stream_stats_display(streamid_t stream_id) { - struct rte_port *port; - uint8_t i; + struct fwd_stream *fs; + static const char *fwd_top_stats_border = "-------"; + + fs = fwd_streams[stream_id]; + if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && + (fs->fwd_dropped == 0)) + return; + printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " + "TX Port=%2d/Queue=%2d %s\n", + fwd_top_stats_border, fs->rx_port, fs->rx_queue, + fs->tx_port, fs->tx_queue, fwd_top_stats_border); + printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64 + " TX-dropped: %-14"PRIu64, + fs->rx_packets, fs->tx_packets, fs->fwd_dropped); + /* if checksum mode */ + if (cur_fwd_eng == &csum_fwd_engine) { + printf(" RX- bad IP checksum: %-14"PRIu64 + " Rx- bad L4 checksum: %-14"PRIu64 + " Rx- bad outer L4 checksum: %-14"PRIu64"\n", + fs->rx_bad_ip_csum, fs->rx_bad_l4_csum, + fs->rx_bad_outer_l4_csum); + } else { + printf("\n"); + } + +#ifdef RTE_TEST_PMD_RECORD_BURST_STATS + pkt_burst_stats_display("RX", &fs->rx_burst_stats); + pkt_burst_stats_display("TX", &fs->tx_burst_stats); +#endif +} + +void +fwd_stats_display(void) +{ static const char *fwd_stats_border = "----------------------"; + static const char *acc_stats_border = "+++++++++++++++"; + struct { + struct fwd_stream *rx_stream; + struct fwd_stream *tx_stream; + uint64_t tx_dropped; + uint64_t rx_bad_ip_csum; + uint64_t rx_bad_l4_csum; + uint64_t rx_bad_outer_l4_csum; + } ports_stats[RTE_MAX_ETHPORTS]; + uint64_t total_rx_dropped = 0; + uint64_t total_tx_dropped = 0; + uint64_t total_rx_nombuf = 0; + struct rte_eth_stats stats; +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + uint64_t fwd_cycles = 0; +#endif + uint64_t total_recv = 0; + uint64_t total_xmit = 0; + struct rte_port *port; + streamid_t sm_id; + portid_t pt_id; + int i; - port = &ports[port_id]; - printf("\n %s Forward statistics for port %-2d %s\n", - fwd_stats_border, port_id, fwd_stats_border); + memset(ports_stats, 0, sizeof(ports_stats)); - if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) { - printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " - "%-"PRIu64"\n", - stats->ipackets, stats->imissed, - (uint64_t) (stats->ipackets + stats->imissed)); + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { + struct fwd_stream *fs = fwd_streams[sm_id]; - if (cur_fwd_eng == &csum_fwd_engine) - printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n", - port->rx_bad_ip_csum, port->rx_bad_l4_csum); - if ((stats->ierrors + stats->rx_nombuf) > 0) { - printf(" RX-error: %-"PRIu64"\n", stats->ierrors); - printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf); + if (cur_fwd_config.nb_fwd_streams > + cur_fwd_config.nb_fwd_ports) { + fwd_stream_stats_display(sm_id); + } else { + ports_stats[fs->tx_port].tx_stream = fs; + ports_stats[fs->rx_port].rx_stream = fs; } - printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " - "%-"PRIu64"\n", - stats->opackets, port->tx_dropped, - (uint64_t) (stats->opackets + port->tx_dropped)); - } - else { - printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:" - "%14"PRIu64"\n", - stats->ipackets, stats->imissed, - (uint64_t) (stats->ipackets + stats->imissed)); + ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped; - if (cur_fwd_eng == &csum_fwd_engine) - printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n", - port->rx_bad_ip_csum, port->rx_bad_l4_csum); - if ((stats->ierrors + stats->rx_nombuf) > 0) { - printf(" RX-error:%"PRIu64"\n", stats->ierrors); - printf(" RX-nombufs: %14"PRIu64"\n", - stats->rx_nombuf); - } + ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum; + ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum; + ports_stats[fs->rx_port].rx_bad_outer_l4_csum += + fs->rx_bad_outer_l4_csum; - printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:" - "%14"PRIu64"\n", - stats->opackets, port->tx_dropped, - (uint64_t) (stats->opackets + port->tx_dropped)); +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + fwd_cycles += fs->core_cycles; +#endif } + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + uint8_t j; + + pt_id = fwd_ports_ids[i]; + port = &ports[pt_id]; + + rte_eth_stats_get(pt_id, &stats); + stats.ipackets -= port->stats.ipackets; + stats.opackets -= port->stats.opackets; + stats.ibytes -= port->stats.ibytes; + stats.obytes -= port->stats.obytes; + stats.imissed -= port->stats.imissed; + stats.oerrors -= port->stats.oerrors; + stats.rx_nombuf -= port->stats.rx_nombuf; + + total_recv += stats.ipackets; + total_xmit += stats.opackets; + total_rx_dropped += stats.imissed; + total_tx_dropped += ports_stats[pt_id].tx_dropped; + total_tx_dropped += stats.oerrors; + total_rx_nombuf += stats.rx_nombuf; + + printf("\n %s Forward statistics for port %-2d %s\n", + fwd_stats_border, pt_id, fwd_stats_border); + + if (!port->rx_queue_stats_mapping_enabled && + !port->tx_queue_stats_mapping_enabled) { + printf(" RX-packets: %-14"PRIu64 + " RX-dropped: %-14"PRIu64 + "RX-total: %-"PRIu64"\n", + stats.ipackets, stats.imissed, + stats.ipackets + stats.imissed); + + if (cur_fwd_eng == &csum_fwd_engine) + printf(" Bad-ipcsum: %-14"PRIu64 + " Bad-l4csum: %-14"PRIu64 + "Bad-outer-l4csum: %-14"PRIu64"\n", + ports_stats[pt_id].rx_bad_ip_csum, + ports_stats[pt_id].rx_bad_l4_csum, + ports_stats[pt_id].rx_bad_outer_l4_csum); + if (stats.ierrors + stats.rx_nombuf > 0) { + printf(" RX-error: %-"PRIu64"\n", + stats.ierrors); + printf(" RX-nombufs: %-14"PRIu64"\n", + stats.rx_nombuf); + } + + printf(" TX-packets: %-14"PRIu64 + " TX-dropped: %-14"PRIu64 + "TX-total: %-"PRIu64"\n", + stats.opackets, ports_stats[pt_id].tx_dropped, + stats.opackets + ports_stats[pt_id].tx_dropped); + } else { + printf(" RX-packets: %14"PRIu64 + " RX-dropped:%14"PRIu64 + " RX-total:%14"PRIu64"\n", + stats.ipackets, stats.imissed, + stats.ipackets + stats.imissed); + + if (cur_fwd_eng == &csum_fwd_engine) + printf(" Bad-ipcsum:%14"PRIu64 + " Bad-l4csum:%14"PRIu64 + " Bad-outer-l4csum: %-14"PRIu64"\n", + ports_stats[pt_id].rx_bad_ip_csum, + ports_stats[pt_id].rx_bad_l4_csum, + ports_stats[pt_id].rx_bad_outer_l4_csum); + if ((stats.ierrors + stats.rx_nombuf) > 0) { + printf(" RX-error:%"PRIu64"\n", stats.ierrors); + printf(" RX-nombufs: %14"PRIu64"\n", + stats.rx_nombuf); + } + + printf(" TX-packets: %14"PRIu64 + " TX-dropped:%14"PRIu64 + " TX-total:%14"PRIu64"\n", + stats.opackets, ports_stats[pt_id].tx_dropped, + stats.opackets + ports_stats[pt_id].tx_dropped); + } #ifdef RTE_TEST_PMD_RECORD_BURST_STATS - if (port->rx_stream) - pkt_burst_stats_display("RX", - &port->rx_stream->rx_burst_stats); - if (port->tx_stream) - pkt_burst_stats_display("TX", - &port->tx_stream->tx_burst_stats); + if (ports_stats[pt_id].rx_stream) + pkt_burst_stats_display("RX", + &ports_stats[pt_id].rx_stream->rx_burst_stats); + if (ports_stats[pt_id].tx_stream) + pkt_burst_stats_display("TX", + &ports_stats[pt_id].tx_stream->tx_burst_stats); #endif - if (port->rx_queue_stats_mapping_enabled) { - printf("\n"); - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { - printf(" Stats reg %2d RX-packets:%14"PRIu64 - " RX-errors:%14"PRIu64 - " RX-bytes:%14"PRIu64"\n", - i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]); + if (port->rx_queue_stats_mapping_enabled) { + printf("\n"); + for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { + printf(" Stats reg %2d RX-packets:%14"PRIu64 + " RX-errors:%14"PRIu64 + " RX-bytes:%14"PRIu64"\n", + j, stats.q_ipackets[j], + stats.q_errors[j], stats.q_ibytes[j]); + } + printf("\n"); } - printf("\n"); - } - if (port->tx_queue_stats_mapping_enabled) { - for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) { - printf(" Stats reg %2d TX-packets:%14"PRIu64 - " TX-bytes:%14"PRIu64"\n", - i, stats->q_opackets[i], stats->q_obytes[i]); + if (port->tx_queue_stats_mapping_enabled) { + for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { + printf(" Stats reg %2d TX-packets:%14"PRIu64 + " TX-bytes:%14" + PRIu64"\n", + j, stats.q_opackets[j], + stats.q_obytes[j]); + } } + + printf(" %s--------------------------------%s\n", + fwd_stats_border, fwd_stats_border); } - printf(" %s--------------------------------%s\n", - fwd_stats_border, fwd_stats_border); + printf("\n %s Accumulated forward statistics for all ports" + "%s\n", + acc_stats_border, acc_stats_border); + printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " + "%-"PRIu64"\n" + " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " + "%-"PRIu64"\n", + total_recv, total_rx_dropped, total_recv + total_rx_dropped, + total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); + if (total_rx_nombuf > 0) + printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); + printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" + "%s\n", + acc_stats_border, acc_stats_border); +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + if (total_recv > 0) + printf("\n CPU cycles/packet=%u (total cycles=" + "%"PRIu64" / total RX packets=%"PRIu64")\n", + (unsigned int)(fwd_cycles / total_recv), + fwd_cycles, total_recv); +#endif } -static void -fwd_stream_stats_display(streamid_t stream_id) +void +fwd_stats_reset(void) { - struct fwd_stream *fs; - static const char *fwd_top_stats_border = "-------"; - - fs = fwd_streams[stream_id]; - if ((fs->rx_packets == 0) && (fs->tx_packets == 0) && - (fs->fwd_dropped == 0)) - return; - printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> " - "TX Port=%2d/Queue=%2d %s\n", - fwd_top_stats_border, fs->rx_port, fs->rx_queue, - fs->tx_port, fs->tx_queue, fwd_top_stats_border); - printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u", - fs->rx_packets, fs->tx_packets, fs->fwd_dropped); + streamid_t sm_id; + portid_t pt_id; + int i; - /* if checksum mode */ - if (cur_fwd_eng == &csum_fwd_engine) { - printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: " - "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum); + for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { + pt_id = fwd_ports_ids[i]; + rte_eth_stats_get(pt_id, &ports[pt_id].stats); } + for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { + struct fwd_stream *fs = fwd_streams[sm_id]; + + fs->rx_packets = 0; + fs->tx_packets = 0; + fs->fwd_dropped = 0; + fs->rx_bad_ip_csum = 0; + fs->rx_bad_l4_csum = 0; + fs->rx_bad_outer_l4_csum = 0; #ifdef RTE_TEST_PMD_RECORD_BURST_STATS - pkt_burst_stats_display("RX", &fs->rx_burst_stats); - pkt_burst_stats_display("TX", &fs->tx_burst_stats); + memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats)); + memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats)); +#endif +#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES + fs->core_cycles = 0; #endif + } } static void @@ -1021,9 +1736,9 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) uint64_t tics_per_1sec; uint64_t tics_datum; uint64_t tics_current; - uint8_t idx_port, cnt_ports; + uint16_t i, cnt_ports; - cnt_ports = rte_eth_dev_count(); + cnt_ports = nb_ports; tics_datum = rte_rdtsc(); tics_per_1sec = rte_get_timer_hz(); #endif @@ -1038,11 +1753,9 @@ run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd) tics_current = rte_rdtsc(); if (tics_current - tics_datum >= tics_per_1sec) { /* Periodic bitrate calculation */ - for (idx_port = 0; - idx_port < cnt_ports; - idx_port++) + for (i = 0; i < cnt_ports; i++) rte_stats_bitrate_calc(bitrate_data, - idx_port); + ports_ids[i]); tics_datum = tics_current; } } @@ -1123,7 +1836,6 @@ start_packet_forwarding(int with_tx_first) struct rte_port *port; unsigned int i; portid_t pt_id; - streamid_t sm_id; if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq) rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n"); @@ -1147,10 +1859,6 @@ start_packet_forwarding(int with_tx_first) return; } - if (init_fwd_streams() < 0) { - printf("Fail from init_fwd_streams()\n"); - return; - } if(dcb_test) { for (i = 0; i < nb_fwd_ports; i++) { @@ -1170,38 +1878,20 @@ start_packet_forwarding(int with_tx_first) } test_done = 0; + fwd_config_setup(); + if(!no_flush_rx) flush_fwd_rx_queues(); - fwd_config_setup(); pkt_fwd_config_display(&cur_fwd_config); rxtx_config_display(); + fwd_stats_reset(); for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { pt_id = fwd_ports_ids[i]; port = &ports[pt_id]; - rte_eth_stats_get(pt_id, &port->stats); - port->tx_dropped = 0; - map_port_queue_stats_mapping_registers(pt_id, port); } - for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { - fwd_streams[sm_id]->rx_packets = 0; - fwd_streams[sm_id]->tx_packets = 0; - fwd_streams[sm_id]->fwd_dropped = 0; - fwd_streams[sm_id]->rx_bad_ip_csum = 0; - fwd_streams[sm_id]->rx_bad_l4_csum = 0; - -#ifdef RTE_TEST_PMD_RECORD_BURST_STATS - memset(&fwd_streams[sm_id]->rx_burst_stats, 0, - sizeof(fwd_streams[sm_id]->rx_burst_stats)); - memset(&fwd_streams[sm_id]->tx_burst_stats, 0, - sizeof(fwd_streams[sm_id]->tx_burst_stats)); -#endif -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - fwd_streams[sm_id]->core_cycles = 0; -#endif - } if (with_tx_first) { port_fwd_begin = tx_only_engine.port_fwd_begin; if (port_fwd_begin != NULL) { @@ -1225,26 +1915,10 @@ start_packet_forwarding(int with_tx_first) void stop_packet_forwarding(void) { - struct rte_eth_stats stats; - struct rte_port *port; - port_fwd_end_t port_fwd_end; + port_fwd_end_t port_fwd_end; + lcoreid_t lc_id; + portid_t pt_id; int i; - portid_t pt_id; - streamid_t sm_id; - lcoreid_t lc_id; - uint64_t total_recv; - uint64_t total_xmit; - uint64_t total_rx_dropped; - uint64_t total_tx_dropped; - uint64_t total_rx_nombuf; - uint64_t tx_dropped; - uint64_t rx_bad_ip_csum; - uint64_t rx_bad_l4_csum; -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - uint64_t fwd_cycles; -#endif - - static const char *acc_stats_border = "+++++++++++++++"; if (test_done) { printf("Packet forwarding not started\n"); @@ -1262,100 +1936,9 @@ stop_packet_forwarding(void) (*port_fwd_end)(pt_id); } } -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - fwd_cycles = 0; -#endif - for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) { - if (cur_fwd_config.nb_fwd_streams > - cur_fwd_config.nb_fwd_ports) { - fwd_stream_stats_display(sm_id); - ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL; - ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL; - } else { - ports[fwd_streams[sm_id]->tx_port].tx_stream = - fwd_streams[sm_id]; - ports[fwd_streams[sm_id]->rx_port].rx_stream = - fwd_streams[sm_id]; - } - tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped; - tx_dropped = (uint64_t) (tx_dropped + - fwd_streams[sm_id]->fwd_dropped); - ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped; - - rx_bad_ip_csum = - ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum; - rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum + - fwd_streams[sm_id]->rx_bad_ip_csum); - ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum = - rx_bad_ip_csum; - - rx_bad_l4_csum = - ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum; - rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum + - fwd_streams[sm_id]->rx_bad_l4_csum); - ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum = - rx_bad_l4_csum; - -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - fwd_cycles = (uint64_t) (fwd_cycles + - fwd_streams[sm_id]->core_cycles); -#endif - } - total_recv = 0; - total_xmit = 0; - total_rx_dropped = 0; - total_tx_dropped = 0; - total_rx_nombuf = 0; - for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) { - pt_id = fwd_ports_ids[i]; - - port = &ports[pt_id]; - rte_eth_stats_get(pt_id, &stats); - stats.ipackets -= port->stats.ipackets; - port->stats.ipackets = 0; - stats.opackets -= port->stats.opackets; - port->stats.opackets = 0; - stats.ibytes -= port->stats.ibytes; - port->stats.ibytes = 0; - stats.obytes -= port->stats.obytes; - port->stats.obytes = 0; - stats.imissed -= port->stats.imissed; - port->stats.imissed = 0; - stats.oerrors -= port->stats.oerrors; - port->stats.oerrors = 0; - stats.rx_nombuf -= port->stats.rx_nombuf; - port->stats.rx_nombuf = 0; - - total_recv += stats.ipackets; - total_xmit += stats.opackets; - total_rx_dropped += stats.imissed; - total_tx_dropped += port->tx_dropped; - total_rx_nombuf += stats.rx_nombuf; - fwd_port_stats_display(pt_id, &stats); - } + fwd_stats_display(); - printf("\n %s Accumulated forward statistics for all ports" - "%s\n", - acc_stats_border, acc_stats_border); - printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: " - "%-"PRIu64"\n" - " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: " - "%-"PRIu64"\n", - total_recv, total_rx_dropped, total_recv + total_rx_dropped, - total_xmit, total_tx_dropped, total_xmit + total_tx_dropped); - if (total_rx_nombuf > 0) - printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf); - printf(" %s++++++++++++++++++++++++++++++++++++++++++++++" - "%s\n", - acc_stats_border, acc_stats_border); -#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES - if (total_recv > 0) - printf("\n CPU cycles/packet=%u (total cycles=" - "%"PRIu64" / total RX packets=%"PRIu64")\n", - (unsigned int)(fwd_cycles / total_recv), - fwd_cycles, total_recv); -#endif printf("\nDone.\n"); test_done = 1; } @@ -1363,14 +1946,14 @@ stop_packet_forwarding(void) void dev_set_link_up(portid_t pid) { - if (rte_eth_dev_set_link_up((uint8_t)pid) < 0) + if (rte_eth_dev_set_link_up(pid) < 0) printf("\nSet link up fail.\n"); } void dev_set_link_down(portid_t pid) { - if (rte_eth_dev_set_link_down((uint8_t)pid) < 0) + if (rte_eth_dev_set_link_down(pid) < 0) printf("\nSet link down fail.\n"); } @@ -1392,16 +1975,24 @@ all_ports_started(void) return 1; } +int +port_is_stopped(portid_t port_id) +{ + struct rte_port *port = &ports[port_id]; + + if ((port->port_status != RTE_PORT_STOPPED) && + (port->slave_flag == 0)) + return 0; + return 1; +} + int all_ports_stopped(void) { portid_t pi; - struct rte_port *port; RTE_ETH_FOREACH_DEV(pi) { - port = &ports[pi]; - if ((port->port_status != RTE_PORT_STOPPED) && - (port->slave_flag == 0)) + if (!port_is_stopped(pi)) return 0; } @@ -1420,18 +2011,6 @@ port_is_started(portid_t port_id) return 1; } -static int -port_is_closed(portid_t port_id) -{ - if (port_id_is_invalid(port_id, ENABLED_WARN)) - return 0; - - if (ports[port_id].port_status != RTE_PORT_CLOSED) - return 0; - - return 1; -} - int start_port(portid_t pid) { @@ -1439,8 +2018,7 @@ start_port(portid_t pid) portid_t pi; queueid_t qi; struct rte_port *port; - struct ether_addr mac_addr; - enum rte_eth_event_type event_type; + struct rte_ether_addr mac_addr; if (port_id_is_invalid(pid, ENABLED_WARN)) return 0; @@ -1470,7 +2048,7 @@ start_port(portid_t pid) return -1; } } - + configure_rxtx_dump_callbacks(0); printf("Configuring Port %d (socket %u)\n", pi, port->socket_id); /* configure port */ @@ -1494,12 +2072,14 @@ start_port(portid_t pid) if ((numa_support) && (txring_numa[pi] != NUMA_NO_CONFIG)) diag = rte_eth_tx_queue_setup(pi, qi, - nb_txd,txring_numa[pi], - &(port->tx_conf)); + port->nb_tx_desc[qi], + txring_numa[pi], + &(port->tx_conf[qi])); else diag = rte_eth_tx_queue_setup(pi, qi, - nb_txd,port->socket_id, - &(port->tx_conf)); + port->nb_tx_desc[qi], + port->socket_id, + &(port->tx_conf[qi])); if (diag == 0) continue; @@ -1510,13 +2090,14 @@ start_port(portid_t pid) RTE_PORT_STOPPED) == 0) printf("Port %d can not be set back " "to stopped\n", pi); - printf("Fail to configure port %d tx queues\n", pi); + printf("Fail to configure port %d tx queues\n", + pi); /* try to reconfigure queues next time */ port->need_reconfig_queues = 1; return -1; } - /* setup rx queues */ for (qi = 0; qi < nb_rxq; qi++) { + /* setup rx queues */ if ((numa_support) && (rxring_numa[pi] != NUMA_NO_CONFIG)) { struct rte_mempool * mp = @@ -1530,8 +2111,10 @@ start_port(portid_t pid) } diag = rte_eth_rx_queue_setup(pi, qi, - nb_rxd,rxring_numa[pi], - &(port->rx_conf),mp); + port->nb_rx_desc[qi], + rxring_numa[pi], + &(port->rx_conf[qi]), + mp); } else { struct rte_mempool *mp = mbuf_pool_find(port->socket_id); @@ -1543,8 +2126,10 @@ start_port(portid_t pid) return -1; } diag = rte_eth_rx_queue_setup(pi, qi, - nb_rxd,port->socket_id, - &(port->rx_conf), mp); + port->nb_rx_desc[qi], + port->socket_id, + &(port->rx_conf[qi]), + mp); } if (diag == 0) continue; @@ -1555,27 +2140,14 @@ start_port(portid_t pid) RTE_PORT_STOPPED) == 0) printf("Port %d can not be set back " "to stopped\n", pi); - printf("Fail to configure port %d rx queues\n", pi); + printf("Fail to configure port %d rx queues\n", + pi); /* try to reconfigure queues next time */ port->need_reconfig_queues = 1; return -1; } } - - for (event_type = RTE_ETH_EVENT_UNKNOWN; - event_type < RTE_ETH_EVENT_MAX; - event_type++) { - diag = rte_eth_dev_callback_register(pi, - event_type, - eth_event_callback, - NULL); - if (diag) { - printf("Failed to setup even callback for event %d\n", - event_type); - return -1; - } - } - + configure_rxtx_dump_callbacks(verbose_level); /* start port */ if (rte_eth_dev_start(pi) < 0) { printf("Fail to start port %d\n", pi); @@ -1660,6 +2232,28 @@ stop_port(portid_t pid) printf("Done\n"); } +static void +remove_invalid_ports_in(portid_t *array, portid_t *total) +{ + portid_t i; + portid_t new_total = 0; + + for (i = 0; i < *total; i++) + if (!port_id_is_invalid(array[i], DISABLED_WARN)) { + array[new_total] = array[i]; + new_total++; + } + *total = new_total; +} + +static void +remove_invalid_ports(void) +{ + remove_invalid_ports_in(ports_ids, &nb_ports); + remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports); + nb_cfg_ports = nb_fwd_ports; +} + void close_port(portid_t pid) { @@ -1702,6 +2296,8 @@ close_port(portid_t pid) port_flow_flush(pi); rte_eth_dev_close(pi); + remove_invalid_ports(); + if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0) printf("Port %d cannot be set to closed\n", pi); @@ -1754,8 +2350,8 @@ reset_port(portid_t pid) void attach_port(char *identifier) { - portid_t pi = 0; - unsigned int socket_id; + portid_t pi; + struct rte_dev_iterator iterator; printf("Attaching a new port...\n"); @@ -1764,18 +2360,46 @@ attach_port(char *identifier) return; } - if (rte_eth_dev_attach(identifier, &pi)) + if (rte_dev_probe(identifier) != 0) { + TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier); + return; + } + + /* first attach mode: event */ + if (setup_on_probe_event) { + /* new ports are detected on RTE_ETH_EVENT_NEW event */ + for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++) + if (ports[pi].port_status == RTE_PORT_HANDLING && + ports[pi].need_setup != 0) + setup_attached_port(pi); return; + } + + /* second attach mode: iterator */ + RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) { + /* setup ports matching the devargs used for probing */ + if (port_is_forwarding(pi)) + continue; /* port was already attached before */ + setup_attached_port(pi); + } +} + +static void +setup_attached_port(portid_t pi) +{ + unsigned int socket_id; socket_id = (unsigned)rte_eth_dev_socket_id(pi); - /* if socket_id is invalid, set to 0 */ + /* if socket_id is invalid, set to the first available socket. */ if (check_socket_id(socket_id) < 0) - socket_id = 0; + socket_id = socket_ids[0]; reconfig(pi, socket_id); rte_eth_promiscuous_enable(pi); - nb_ports = rte_eth_dev_count(); - + ports_ids[nb_ports++] = pi; + fwd_ports_ids[nb_fwd_ports++] = pi; + nb_cfg_ports = nb_fwd_ports; + ports[pi].need_setup = 0; ports[pi].port_status = RTE_PORT_STOPPED; printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); @@ -1783,29 +2407,47 @@ attach_port(char *identifier) } void -detach_port(uint8_t port_id) +detach_port_device(portid_t port_id) { - char name[RTE_ETH_NAME_MAX_LEN]; + struct rte_device *dev; + portid_t sibling; - printf("Detaching a port...\n"); + printf("Removing a device...\n"); - if (!port_is_closed(port_id)) { - printf("Please close port first\n"); + dev = rte_eth_devices[port_id].device; + if (dev == NULL) { + printf("Device already removed\n"); return; } - if (ports[port_id].flow_list) - port_flow_flush(port_id); + if (ports[port_id].port_status != RTE_PORT_CLOSED) { + if (ports[port_id].port_status != RTE_PORT_STOPPED) { + printf("Port not stopped\n"); + return; + } + printf("Port was not closed\n"); + if (ports[port_id].flow_list) + port_flow_flush(port_id); + } - if (rte_eth_dev_detach(port_id, name)) { - RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name); + if (rte_dev_remove(dev) != 0) { + TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name); return; } + RTE_ETH_FOREACH_DEV_OF(sibling, dev) { + /* reset mapping between old ports and removed device */ + rte_eth_devices[sibling].device = NULL; + if (ports[sibling].port_status != RTE_PORT_CLOSED) { + /* sibling ports are forced to be closed */ + ports[sibling].port_status = RTE_PORT_CLOSED; + printf("Port %u is closed\n", sibling); + } + } - nb_ports = rte_eth_dev_count(); + remove_invalid_ports(); - printf("Port '%s' is detached. Now total ports is %d\n", - name, nb_ports); + printf("Device of port %u is detached\n", port_id); + printf("Now total ports is %d\n", nb_ports); printf("Done\n"); return; } @@ -1813,20 +2455,75 @@ detach_port(uint8_t port_id) void pmd_test_exit(void) { + struct rte_device *device; portid_t pt_id; + int ret; + int i; if (test_done == 0) stop_packet_forwarding(); + for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { + if (mempools[i]) { + if (mp_alloc_type == MP_ALLOC_ANON) + rte_mempool_mem_iter(mempools[i], dma_unmap_cb, + NULL); + } + } if (ports != NULL) { no_link_check = 1; RTE_ETH_FOREACH_DEV(pt_id) { - printf("\nShutting down port %d...\n", pt_id); + printf("\nStopping port %d...\n", pt_id); fflush(stdout); stop_port(pt_id); + } + RTE_ETH_FOREACH_DEV(pt_id) { + printf("\nShutting down port %d...\n", pt_id); + fflush(stdout); close_port(pt_id); + + /* + * This is a workaround to fix a virtio-user issue that + * requires to call clean-up routine to remove existing + * socket. + * This workaround valid only for testpmd, needs a fix + * valid for all applications. + * TODO: Implement proper resource cleanup + */ + device = rte_eth_devices[pt_id].device; + if (device && !strcmp(device->driver->name, "net_virtio_user")) + detach_port_device(pt_id); + } + } + + if (hot_plug) { + ret = rte_dev_event_monitor_stop(); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to stop device event monitor."); + return; + } + + ret = rte_dev_event_callback_unregister(NULL, + dev_event_callback, NULL); + if (ret < 0) { + RTE_LOG(ERR, EAL, + "fail to unregister device event callback.\n"); + return; + } + + ret = rte_dev_hotplug_handle_disable(); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to disable hotplug handling.\n"); + return; } } + for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) { + if (mempools[i]) + rte_mempool_free(mempools[i]); + } + printf("\nBye...\n"); } @@ -1894,21 +2591,32 @@ check_all_ports_link_status(uint32_t port_mask) } } +/* + * This callback is for remove a port for a device. It has limitation because + * it is not for multiple port removal for a device. + * TODO: the device detach invoke will plan to be removed from user side to + * eal. And convert all PMDs to free port resources on ether device closing. + */ static void -rmv_event_callback(void *arg) +rmv_port_callback(void *arg) { - struct rte_eth_dev *dev; - uint8_t port_id = (intptr_t)arg; + int need_to_start = 0; + int org_no_link_check = no_link_check; + portid_t port_id = (intptr_t)arg; RTE_ETH_VALID_PORTID_OR_RET(port_id); - dev = &rte_eth_devices[port_id]; + if (!test_done && port_is_forwarding(port_id)) { + need_to_start = 1; + stop_packet_forwarding(); + } + no_link_check = 1; stop_port(port_id); + no_link_check = org_no_link_check; close_port(port_id); - printf("removing device %s\n", dev->device->name); - if (rte_eal_dev_detach(dev->device)) - RTE_LOG(ERR, USER1, "Failed to detach device %s\n", - dev->device->name); + detach_port_device(port_id); + if (need_to_start) + start_packet_forwarding(0); } /* This function is used by the interrupt thread */ @@ -1916,34 +2624,29 @@ static int eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, void *ret_param) { - static const char * const event_desc[] = { - [RTE_ETH_EVENT_UNKNOWN] = "Unknown", - [RTE_ETH_EVENT_INTR_LSC] = "LSC", - [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state", - [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset", - [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox", - [RTE_ETH_EVENT_MACSEC] = "MACsec", - [RTE_ETH_EVENT_INTR_RMV] = "device removal", - [RTE_ETH_EVENT_MAX] = NULL, - }; - RTE_SET_USED(param); RTE_SET_USED(ret_param); if (type >= RTE_ETH_EVENT_MAX) { - fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n", + fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n", port_id, __func__, type); fflush(stderr); } else if (event_print_mask & (UINT32_C(1) << type)) { - printf("\nPort %" PRIu8 ": %s event\n", port_id, - event_desc[type]); + printf("\nPort %" PRIu16 ": %s event\n", port_id, + eth_event_desc[type]); fflush(stdout); } switch (type) { + case RTE_ETH_EVENT_NEW: + ports[port_id].need_setup = 1; + ports[port_id].port_status = RTE_PORT_HANDLING; + break; case RTE_ETH_EVENT_INTR_RMV: + if (port_id_is_invalid(port_id, DISABLED_WARN)) + break; if (rte_eal_alarm_set(100000, - rmv_event_callback, (void *)(intptr_t)port_id)) + rmv_port_callback, (void *)(intptr_t)port_id)) fprintf(stderr, "Could not set up deferred device removal\n"); break; default: @@ -1953,7 +2656,79 @@ eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param, } static int -set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) +register_eth_event_callback(void) +{ + int ret; + enum rte_eth_event_type event; + + for (event = RTE_ETH_EVENT_UNKNOWN; + event < RTE_ETH_EVENT_MAX; event++) { + ret = rte_eth_dev_callback_register(RTE_ETH_ALL, + event, + eth_event_callback, + NULL); + if (ret != 0) { + TESTPMD_LOG(ERR, "Failed to register callback for " + "%s event\n", eth_event_desc[event]); + return -1; + } + } + + return 0; +} + +/* This function is used by the interrupt thread */ +static void +dev_event_callback(const char *device_name, enum rte_dev_event_type type, + __rte_unused void *arg) +{ + uint16_t port_id; + int ret; + + if (type >= RTE_DEV_EVENT_MAX) { + fprintf(stderr, "%s called upon invalid event %d\n", + __func__, type); + fflush(stderr); + } + + switch (type) { + case RTE_DEV_EVENT_REMOVE: + RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n", + device_name); + ret = rte_eth_dev_get_port_by_name(device_name, &port_id); + if (ret) { + RTE_LOG(ERR, EAL, "can not get port by device %s!\n", + device_name); + return; + } + /* + * Because the user's callback is invoked in eal interrupt + * callback, the interrupt callback need to be finished before + * it can be unregistered when detaching device. So finish + * callback soon and use a deferred removal to detach device + * is need. It is a workaround, once the device detaching be + * moved into the eal in the future, the deferred removal could + * be deleted. + */ + if (rte_eal_alarm_set(100000, + rmv_port_callback, (void *)(intptr_t)port_id)) + RTE_LOG(ERR, EAL, + "Could not set up deferred device removal\n"); + break; + case RTE_DEV_EVENT_ADD: + RTE_LOG(ERR, EAL, "The device: %s has been added!\n", + device_name); + /* TODO: After finish kernel driver binding, + * begin to attach port. + */ + break; + default: + break; + } +} + +static int +set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) { uint16_t i; int diag; @@ -1976,7 +2751,7 @@ set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) } static int -set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) +set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port) { uint16_t i; int diag; @@ -1999,7 +2774,7 @@ set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port) } static void -map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) +map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port) { int diag = 0; @@ -2033,42 +2808,56 @@ map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port) static void rxtx_port_config(struct rte_port *port) { - port->rx_conf = port->dev_info.default_rxconf; - port->tx_conf = port->dev_info.default_txconf; + uint16_t qid; + uint64_t offloads; + + for (qid = 0; qid < nb_rxq; qid++) { + offloads = port->rx_conf[qid].offloads; + port->rx_conf[qid] = port->dev_info.default_rxconf; + port->rx_conf[qid].offloads |= offloads; + + /* Check if any Rx parameters have been passed */ + if (rx_pthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.pthresh = rx_pthresh; + + if (rx_hthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.hthresh = rx_hthresh; - /* Check if any RX/TX parameters have been passed */ - if (rx_pthresh != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_thresh.pthresh = rx_pthresh; + if (rx_wthresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_thresh.wthresh = rx_wthresh; - if (rx_hthresh != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_thresh.hthresh = rx_hthresh; + if (rx_free_thresh != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_free_thresh = rx_free_thresh; - if (rx_wthresh != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_thresh.wthresh = rx_wthresh; + if (rx_drop_en != RTE_PMD_PARAM_UNSET) + port->rx_conf[qid].rx_drop_en = rx_drop_en; - if (rx_free_thresh != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_free_thresh = rx_free_thresh; + port->nb_rx_desc[qid] = nb_rxd; + } - if (rx_drop_en != RTE_PMD_PARAM_UNSET) - port->rx_conf.rx_drop_en = rx_drop_en; + for (qid = 0; qid < nb_txq; qid++) { + offloads = port->tx_conf[qid].offloads; + port->tx_conf[qid] = port->dev_info.default_txconf; + port->tx_conf[qid].offloads |= offloads; - if (tx_pthresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_thresh.pthresh = tx_pthresh; + /* Check if any Tx parameters have been passed */ + if (tx_pthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.pthresh = tx_pthresh; - if (tx_hthresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_thresh.hthresh = tx_hthresh; + if (tx_hthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.hthresh = tx_hthresh; - if (tx_wthresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_thresh.wthresh = tx_wthresh; + if (tx_wthresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_thresh.wthresh = tx_wthresh; - if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_rs_thresh = tx_rs_thresh; + if (tx_rs_thresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_rs_thresh = tx_rs_thresh; - if (tx_free_thresh != RTE_PMD_PARAM_UNSET) - port->tx_conf.tx_free_thresh = tx_free_thresh; + if (tx_free_thresh != RTE_PMD_PARAM_UNSET) + port->tx_conf[qid].tx_free_thresh = tx_free_thresh; - if (txq_flags != RTE_PMD_PARAM_UNSET) - port->tx_conf.txq_flags = txq_flags; + port->nb_tx_desc[qid] = nb_txd; + } } void @@ -2079,11 +2868,12 @@ init_port_config(void) RTE_ETH_FOREACH_DEV(pid) { port = &ports[pid]; - port->dev_conf.rxmode = rx_mode; port->dev_conf.fdir_conf = fdir_conf; + rte_eth_dev_info_get(pid, &port->dev_info); if (nb_rxq > 1) { port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; - port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf; + port->dev_conf.rx_adv_conf.rss_conf.rss_hf = + rss_hf & port->dev_info.flow_type_rss_offloads; } else { port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL; port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; @@ -2113,17 +2903,6 @@ init_port_config(void) (rte_eth_devices[pid].data->dev_flags & RTE_ETH_DEV_INTR_RMV)) port->dev_conf.intr_conf.rmv = 1; - -#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED - /* Detect softnic port */ - if (!strcmp(port->dev_info.driver_name, "net_softnic")) { - port->softnic_enable = 1; - memset(&port->softport, 0, sizeof(struct softnic_port)); - - if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm")) - port->softport.tm_flag = 1; - } -#endif } } @@ -2148,7 +2927,10 @@ uint8_t port_is_bonding_slave(portid_t slave_pid) struct rte_port *port; port = &ports[slave_pid]; - return port->slave_flag; + if ((rte_eth_devices[slave_pid].data->dev_flags & + RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) + return 1; + return 0; } const uint16_t vlan_tags[] = { @@ -2159,12 +2941,14 @@ const uint16_t vlan_tags[] = { }; static int -get_eth_dcb_conf(struct rte_eth_conf *eth_conf, +get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf, enum dcb_mode_enable dcb_mode, enum rte_eth_nb_tcs num_tcs, uint8_t pfc_en) { uint8_t i; + int32_t rc; + struct rte_eth_rss_conf rss_conf; /* * Builds up the correct configuration for dcb+vt based on the vlan tags array @@ -2191,8 +2975,8 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 1 << (i % vmdq_rx_conf->nb_queue_pools); } for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - vmdq_rx_conf->dcb_tc[i] = i; - vmdq_tx_conf->dcb_tc[i] = i; + vmdq_rx_conf->dcb_tc[i] = i % num_tcs; + vmdq_tx_conf->dcb_tc[i] = i % num_tcs; } /* set DCB mode of RX and TX of multiple queues */ @@ -2204,6 +2988,10 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct rte_eth_dcb_tx_conf *tx_conf = ð_conf->tx_adv_conf.dcb_tx_conf; + rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf); + if (rc != 0) + return rc; + rx_conf->nb_tcs = num_tcs; tx_conf->nb_tcs = num_tcs; @@ -2211,8 +2999,9 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, rx_conf->dcb_tc[i] = i % num_tcs; tx_conf->dcb_tc[i] = i % num_tcs; } + eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; - eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; + eth_conf->rx_adv_conf.rss_conf = rss_conf; eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB; } @@ -2242,19 +3031,19 @@ init_port_dcb_config(portid_t pid, /* Enter DCB configuration status */ dcb_config = 1; + port_conf.rxmode = rte_port->dev_conf.rxmode; + port_conf.txmode = rte_port->dev_conf.txmode; + /*set configuration of DCB in vt mode and DCB in non-vt mode*/ - retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); + retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en); if (retval < 0) return retval; - port_conf.rxmode.hw_vlan_filter = 1; - - /** - * Write the configuration into the device. - * Set the numbers of RX & TX queues to 0, so - * the RX & TX queues will not be setup. - */ - rte_eth_dev_configure(pid, 0, 0, &port_conf); + port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; + /* re-configure the device . */ + retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf); + if (retval < 0) + return retval; rte_eth_dev_info_get(pid, &rte_port->dev_info); /* If dev_info.vmdq_pool_base is greater than 0, @@ -2295,7 +3084,7 @@ init_port_dcb_config(portid_t pid, rxtx_port_config(rte_port); /* VLAN filter */ - rte_port->dev_conf.rxmode.hw_vlan_filter = 1; + rte_port->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER; for (i = 0; i < RTE_DIM(vlan_tags); i++) rx_vft_set(pid, vlan_tags[i], 1); @@ -2319,6 +3108,11 @@ init_port(void) "rte_zmalloc(%d struct rte_port) failed\n", RTE_MAX_ETHPORTS); } + + /* Initialize ports NUMA structures */ + memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); + memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); + memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS); } static void @@ -2341,6 +3135,8 @@ print_stats(void) printf("\nPort statistics ===================================="); for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) nic_stats_display(fwd_ports_ids[i]); + + fflush(stdout); } static void @@ -2368,8 +3164,10 @@ signal_handler(int signum) int main(int argc, char** argv) { - int diag; + int diag; portid_t port_id; + uint16_t count; + int ret; signal(SIGINT, signal_handler); signal(SIGTERM, signal_handler); @@ -2378,19 +3176,28 @@ main(int argc, char** argv) if (diag < 0) rte_panic("Cannot init EAL\n"); - if (mlockall(MCL_CURRENT | MCL_FUTURE)) { - RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n", - strerror(errno)); - } + testpmd_logtype = rte_log_register("testpmd"); + if (testpmd_logtype < 0) + rte_panic("Cannot register log type"); + rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG); + + ret = register_eth_event_callback(); + if (ret != 0) + rte_panic("Cannot register for ethdev events"); #ifdef RTE_LIBRTE_PDUMP /* initialize packet capture framework */ - rte_pdump_init(NULL); + rte_pdump_init(); #endif - nb_ports = (portid_t) rte_eth_dev_count(); + count = 0; + RTE_ETH_FOREACH_DEV(port_id) { + ports_ids[count] = port_id; + count++; + } + nb_ports = (portid_t) count; if (nb_ports == 0) - RTE_LOG(WARNING, EAL, "No probed ethernet devices\n"); + TESTPMD_LOG(WARNING, "No probed ethernet devices\n"); /* allocate port structures, and init them */ init_port(); @@ -2408,11 +3215,23 @@ main(int argc, char** argv) latencystats_enabled = 0; #endif + /* on FreeBSD, mlockall() is disabled by default */ +#ifdef RTE_EXEC_ENV_FREEBSD + do_mlockall = 0; +#else + do_mlockall = 1; +#endif + argc -= diag; argv += diag; if (argc > 1) launch_args_parse(argc, argv); + if (do_mlockall && mlockall(MCL_CURRENT | MCL_FUTURE)) { + TESTPMD_LOG(NOTICE, "mlockall() failed with error \"%s\"\n", + strerror(errno)); + } + if (tx_first && interactive) rte_exit(EXIT_FAILURE, "--tx-first cannot be used on " "interactive mode.\n"); @@ -2432,6 +3251,31 @@ main(int argc, char** argv) nb_rxq, nb_txq); init_config(); + + if (hot_plug) { + ret = rte_dev_hotplug_handle_enable(); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to enable hotplug handling."); + return -1; + } + + ret = rte_dev_event_monitor_start(); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to start device event monitoring."); + return -1; + } + + ret = rte_dev_event_callback_register(NULL, + dev_event_callback, NULL); + if (ret) { + RTE_LOG(ERR, EAL, + "fail to register device event callback\n"); + return -1; + } + } + if (start_port(RTE_PORT_ALL) != 0) rte_exit(EXIT_FAILURE, "Start ports failed\n");