#include <rte_log.h>
#include <rte_debug.h>
#include <rte_cycles.h>
+#include <rte_malloc_heap.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_launch.h>
#include "testpmd.h"
+#ifndef MAP_HUGETLB
+/* FreeBSD may not have MAP_HUGETLB (in fact, it probably doesn't) */
+#define HUGE_FLAG (0x40000)
+#else
+#define HUGE_FLAG MAP_HUGETLB
+#endif
+
+#ifndef MAP_HUGE_SHIFT
+/* older kernels (or FreeBSD) will not have this define */
+#define HUGE_SHIFT (26)
+#else
+#define HUGE_SHIFT MAP_HUGE_SHIFT
+#endif
+
+#define EXTMEM_HEAP_NAME "extmem"
+
uint16_t verbose_level = 0; /**< Silent by default. */
int testpmd_logtype; /**< Log type for testpmd logs */
uint8_t socket_num = UMA_NO_CONFIG;
/*
- * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
+ * Select mempool allocation type:
+ * - native: use regular DPDK memory
+ * - anon: use regular DPDK memory to create mempool, but populate using
+ * anonymous memory (may not be IOVA-contiguous)
+ * - xmem: use externally allocated hugepage memory
*/
-uint8_t mp_anon = 0;
+uint8_t mp_alloc_type = MP_ALLOC_NATIVE;
/*
* Store specified sockets on which memory pool to be used by ports
* Must be instantiated with the ethernet addresses of peer traffic generator
* ports.
*/
-struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
+struct rte_ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
portid_t nb_peer_eth_addrs = 0;
/*
struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
lcoreid_t nb_lcores; /**< Number of probed logical cores. */
+portid_t ports_ids[RTE_MAX_ETHPORTS]; /**< Store all port ids. */
+
/*
* Test Forwarding Configuration.
* nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
&tx_only_engine,
&csum_fwd_engine,
&icmp_echo_engine,
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
- &softnic_tm_engine,
- &softnic_tm_bypass_engine,
+ &noisy_vnf_engine,
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+ &softnic_fwd_engine,
#endif
#ifdef RTE_LIBRTE_IEEE1588
&ieee1588_fwd_engine,
NULL,
};
+struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
+uint16_t mempool_flags;
+
struct fwd_config cur_fwd_config;
struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
uint32_t retry_enabled;
enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
/**< Split policy for packets to TX. */
+uint8_t txonly_multi_flow;
+/**< Whether multiple flows are generated in TXONLY mode. */
+
uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
*/
int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
+/*
+ * Configurable value of buffered packets before sending.
+ */
+uint16_t noisy_tx_sw_bufsz;
+
+/*
+ * Configurable value of packet buffer timeout.
+ */
+uint16_t noisy_tx_sw_buf_flush_time;
+
+/*
+ * Configurable value for size of VNF internal memory area
+ * used for simulating noisy neighbour behaviour
+ */
+uint64_t noisy_lkup_mem_sz;
+
+/*
+ * Configurable value of number of random writes done in
+ * VNF simulation memory area.
+ */
+uint64_t noisy_lkup_num_writes;
+
+/*
+ * Configurable value of number of random reads done in
+ * VNF simulation memory area.
+ */
+uint64_t noisy_lkup_num_reads;
+
+/*
+ * Configurable value of number of random reads/writes done in
+ * VNF simulation memory area.
+ */
+uint64_t noisy_lkup_num_reads_writes;
+
/*
* Receive Side Scaling (RSS) configuration.
*/
uint8_t hot_plug = 0; /**< hotplug disabled by default. */
+/* After attach, port setup is called on event or by iterator */
+bool setup_on_probe_event = true;
+
+/* Pretty printing of ethdev events */
+static const char * const eth_event_desc[] = {
+ [RTE_ETH_EVENT_UNKNOWN] = "unknown",
+ [RTE_ETH_EVENT_INTR_LSC] = "link state change",
+ [RTE_ETH_EVENT_QUEUE_STATE] = "queue state",
+ [RTE_ETH_EVENT_INTR_RESET] = "reset",
+ [RTE_ETH_EVENT_VF_MBOX] = "VF mbox",
+ [RTE_ETH_EVENT_IPSEC] = "IPsec",
+ [RTE_ETH_EVENT_MACSEC] = "MACsec",
+ [RTE_ETH_EVENT_INTR_RMV] = "device removal",
+ [RTE_ETH_EVENT_NEW] = "device probed",
+ [RTE_ETH_EVENT_DESTROY] = "device released",
+ [RTE_ETH_EVENT_MAX] = NULL,
+};
+
/*
* Display or mask ether events
* Default to all events except VF_MBOX
* Ethernet device configuration.
*/
struct rte_eth_rxmode rx_mode = {
- .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
- .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
- .ignore_offload_bitfield = 1,
+ .max_rx_pkt_len = RTE_ETHER_MAX_LEN,
+ /**< Default maximum frame length. */
};
struct rte_eth_txmode tx_mode = {
.pballoc = RTE_FDIR_PBALLOC_64K,
.status = RTE_FDIR_REPORT_STATUS,
.mask = {
- .vlan_tci_mask = 0x0,
+ .vlan_tci_mask = 0xFFEF,
.ipv4_mask = {
.src_ip = 0xFFFFFFFF,
.dst_ip = 0xFFFFFFFF,
struct gro_status gro_ports[RTE_MAX_ETHPORTS];
uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
+struct vxlan_encap_conf vxlan_encap_conf = {
+ .select_ipv4 = 1,
+ .select_vlan = 0,
+ .select_tos_ttl = 0,
+ .vni = "\x00\x00\x00",
+ .udp_src = 0,
+ .udp_dst = RTE_BE16(4789),
+ .ipv4_src = RTE_IPV4(127, 0, 0, 1),
+ .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
+ .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x01",
+ .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x11\x11",
+ .vlan_tci = 0,
+ .ip_tos = 0,
+ .ip_ttl = 255,
+ .eth_src = "\x00\x00\x00\x00\x00\x00",
+ .eth_dst = "\xff\xff\xff\xff\xff\xff",
+};
+
+struct nvgre_encap_conf nvgre_encap_conf = {
+ .select_ipv4 = 1,
+ .select_vlan = 0,
+ .tni = "\x00\x00\x00",
+ .ipv4_src = RTE_IPV4(127, 0, 0, 1),
+ .ipv4_dst = RTE_IPV4(255, 255, 255, 255),
+ .ipv6_src = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x00\x01",
+ .ipv6_dst = "\x00\x00\x00\x00\x00\x00\x00\x00"
+ "\x00\x00\x00\x00\x00\x00\x11\x11",
+ .vlan_tci = 0,
+ .eth_src = "\x00\x00\x00\x00\x00\x00",
+ .eth_dst = "\xff\xff\xff\xff\xff\xff",
+};
+
/* Forward function declarations */
+static void setup_attached_port(portid_t pi);
static void map_port_queue_stats_mapping_registers(portid_t pi,
struct rte_port *port);
static void check_all_ports_link_status(uint32_t port_mask);
static int eth_event_callback(portid_t port_id,
enum rte_eth_event_type type,
void *param, void *ret_param);
-static void eth_dev_event_callback(char *device_name,
+static void dev_event_callback(const char *device_name,
enum rte_dev_event_type type,
void *param);
-static int eth_dev_event_callback_register(void);
-static int eth_dev_event_callback_unregister(void);
-
/*
* Check if all the ports are started.
static int all_ports_started(void);
struct gso_status gso_ports[RTE_MAX_ETHPORTS];
-uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
+uint16_t gso_max_segment_size = RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN;
/*
* Helper function to check if socket is already discovered.
nb_lc = 0;
for (i = 0; i < RTE_MAX_LCORE; i++) {
+ if (!rte_lcore_is_enabled(i))
+ continue;
sock_num = rte_lcore_to_socket_id(i);
if (new_socket_id(sock_num)) {
if (num_sockets >= RTE_MAX_NUMA_NODES) {
}
socket_ids[num_sockets++] = sock_num;
}
- if (!rte_lcore_is_enabled(i))
- continue;
if (i == rte_get_master_lcore())
continue;
fwd_lcores_cpuids[nb_lc++] = i;
portid_t i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
+ peer_eth_addrs[i].addr_bytes[0] = RTE_ETHER_LOCAL_ADMIN_ADDR;
peer_eth_addrs[i].addr_bytes[5] = i;
}
}
portid_t pt_id;
int i = 0;
- RTE_ETH_FOREACH_DEV(pt_id)
+ RTE_ETH_FOREACH_DEV(pt_id) {
fwd_ports_ids[i++] = pt_id;
+ /* Update sockets info according to the attached device */
+ int socket_id = rte_eth_dev_socket_id(pt_id);
+ if (socket_id >= 0 && new_socket_id(socket_id)) {
+ if (num_sockets >= RTE_MAX_NUMA_NODES) {
+ rte_exit(EXIT_FAILURE,
+ "Total sockets greater than %u\n",
+ RTE_MAX_NUMA_NODES);
+ }
+ socket_ids[num_sockets++] = socket_id;
+ }
+ }
+
nb_cfg_ports = nb_ports;
nb_fwd_ports = nb_ports;
}
set_default_fwd_ports_config();
}
+/* extremely pessimistic estimation of memory required to create a mempool */
+static int
+calc_mem_size(uint32_t nb_mbufs, uint32_t mbuf_sz, size_t pgsz, size_t *out)
+{
+ unsigned int n_pages, mbuf_per_pg, leftover;
+ uint64_t total_mem, mbuf_mem, obj_sz;
+
+ /* there is no good way to predict how much space the mempool will
+ * occupy because it will allocate chunks on the fly, and some of those
+ * will come from default DPDK memory while some will come from our
+ * external memory, so just assume 128MB will be enough for everyone.
+ */
+ uint64_t hdr_mem = 128 << 20;
+
+ /* account for possible non-contiguousness */
+ obj_sz = rte_mempool_calc_obj_size(mbuf_sz, 0, NULL);
+ if (obj_sz > pgsz) {
+ TESTPMD_LOG(ERR, "Object size is bigger than page size\n");
+ return -1;
+ }
+
+ mbuf_per_pg = pgsz / obj_sz;
+ leftover = (nb_mbufs % mbuf_per_pg) > 0;
+ n_pages = (nb_mbufs / mbuf_per_pg) + leftover;
+
+ mbuf_mem = n_pages * pgsz;
+
+ total_mem = RTE_ALIGN(hdr_mem + mbuf_mem, pgsz);
+
+ if (total_mem > SIZE_MAX) {
+ TESTPMD_LOG(ERR, "Memory size too big\n");
+ return -1;
+ }
+ *out = (size_t)total_mem;
+
+ return 0;
+}
+
+static int
+pagesz_flags(uint64_t page_sz)
+{
+ /* as per mmap() manpage, all page sizes are log2 of page size
+ * shifted by MAP_HUGE_SHIFT
+ */
+ int log2 = rte_log2_u64(page_sz);
+
+ return (log2 << HUGE_SHIFT);
+}
+
+static void *
+alloc_mem(size_t memsz, size_t pgsz, bool huge)
+{
+ void *addr;
+ int flags;
+
+ /* allocate anonymous hugepages */
+ flags = MAP_ANONYMOUS | MAP_PRIVATE;
+ if (huge)
+ flags |= HUGE_FLAG | pagesz_flags(pgsz);
+
+ addr = mmap(NULL, memsz, PROT_READ | PROT_WRITE, flags, -1, 0);
+ if (addr == MAP_FAILED)
+ return NULL;
+
+ return addr;
+}
+
+struct extmem_param {
+ void *addr;
+ size_t len;
+ size_t pgsz;
+ rte_iova_t *iova_table;
+ unsigned int iova_table_len;
+};
+
+static int
+create_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, struct extmem_param *param,
+ bool huge)
+{
+ uint64_t pgsizes[] = {RTE_PGSIZE_2M, RTE_PGSIZE_1G, /* x86_64, ARM */
+ RTE_PGSIZE_16M, RTE_PGSIZE_16G}; /* POWER */
+ unsigned int cur_page, n_pages, pgsz_idx;
+ size_t mem_sz, cur_pgsz;
+ rte_iova_t *iovas = NULL;
+ void *addr;
+ int ret;
+
+ for (pgsz_idx = 0; pgsz_idx < RTE_DIM(pgsizes); pgsz_idx++) {
+ /* skip anything that is too big */
+ if (pgsizes[pgsz_idx] > SIZE_MAX)
+ continue;
+
+ cur_pgsz = pgsizes[pgsz_idx];
+
+ /* if we were told not to allocate hugepages, override */
+ if (!huge)
+ cur_pgsz = sysconf(_SC_PAGESIZE);
+
+ ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
+ return -1;
+ }
+
+ /* allocate our memory */
+ addr = alloc_mem(mem_sz, cur_pgsz, huge);
+
+ /* if we couldn't allocate memory with a specified page size,
+ * that doesn't mean we can't do it with other page sizes, so
+ * try another one.
+ */
+ if (addr == NULL)
+ continue;
+
+ /* store IOVA addresses for every page in this memory area */
+ n_pages = mem_sz / cur_pgsz;
+
+ iovas = malloc(sizeof(*iovas) * n_pages);
+
+ if (iovas == NULL) {
+ TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
+ goto fail;
+ }
+ /* lock memory if it's not huge pages */
+ if (!huge)
+ mlock(addr, mem_sz);
+
+ /* populate IOVA addresses */
+ for (cur_page = 0; cur_page < n_pages; cur_page++) {
+ rte_iova_t iova;
+ size_t offset;
+ void *cur;
+
+ offset = cur_pgsz * cur_page;
+ cur = RTE_PTR_ADD(addr, offset);
+
+ /* touch the page before getting its IOVA */
+ *(volatile char *)cur = 0;
+
+ iova = rte_mem_virt2iova(cur);
+
+ iovas[cur_page] = iova;
+ }
+
+ break;
+ }
+ /* if we couldn't allocate anything */
+ if (iovas == NULL)
+ return -1;
+
+ param->addr = addr;
+ param->len = mem_sz;
+ param->pgsz = cur_pgsz;
+ param->iova_table = iovas;
+ param->iova_table_len = n_pages;
+
+ return 0;
+fail:
+ if (iovas)
+ free(iovas);
+ if (addr)
+ munmap(addr, mem_sz);
+
+ return -1;
+}
+
+static int
+setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
+{
+ struct extmem_param param;
+ int socket_id, ret;
+
+ memset(¶m, 0, sizeof(param));
+
+ /* check if our heap exists */
+ socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
+ if (socket_id < 0) {
+ /* create our heap */
+ ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot create heap\n");
+ return -1;
+ }
+ }
+
+ ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot create memory area\n");
+ return -1;
+ }
+
+ /* we now have a valid memory area, so add it to heap */
+ ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
+ param.addr, param.len, param.iova_table,
+ param.iova_table_len, param.pgsz);
+
+ /* when using VFIO, memory is automatically mapped for DMA by EAL */
+
+ /* not needed any more */
+ free(param.iova_table);
+
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
+ munmap(param.addr, param.len);
+ return -1;
+ }
+
+ /* success */
+
+ TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
+ param.len >> 20);
+
+ return 0;
+}
+static void
+dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[pid];
+
+ ret = rte_dev_dma_unmap(dev->device, memhdr->addr, 0,
+ memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA unmap addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev->data->name);
+ }
+ }
+ ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to un-register addr 0x%p\n", memhdr->addr);
+ }
+}
+
+static void
+dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+ int ret;
+
+ ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
+ page_size);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to register addr 0x%p\n", memhdr->addr);
+ return;
+ }
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev *dev =
+ &rte_eth_devices[pid];
+
+ ret = rte_dev_dma_map(dev->device, memhdr->addr, 0,
+ memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA map addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev->data->name);
+ }
+ }
+}
+
/*
* Configuration initialisation done once at init time.
*/
-static void
+static struct rte_mempool *
mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
unsigned int socket_id)
{
"create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
pool_name, nb_mbuf, mbuf_seg_size, socket_id);
- if (mp_anon != 0) {
- rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
- mb_size, (unsigned) mb_mempool_cache,
- sizeof(struct rte_pktmbuf_pool_private),
- socket_id, 0);
- if (rte_mp == NULL)
- goto err;
-
- if (rte_mempool_populate_anon(rte_mp) == 0) {
- rte_mempool_free(rte_mp);
- rte_mp = NULL;
- goto err;
- }
- rte_pktmbuf_pool_init(rte_mp, NULL);
- rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
- } else {
- /* wrapper to rte_mempool_create() */
- TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
- rte_mbuf_best_mempool_ops());
- rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
- mb_mempool_cache, 0, mbuf_seg_size, socket_id);
+ switch (mp_alloc_type) {
+ case MP_ALLOC_NATIVE:
+ {
+ /* wrapper to rte_mempool_create() */
+ TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
+ rte_mbuf_best_mempool_ops());
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size, socket_id);
+ break;
+ }
+ case MP_ALLOC_ANON:
+ {
+ rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
+ mb_size, (unsigned int) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ socket_id, mempool_flags);
+ if (rte_mp == NULL)
+ goto err;
+
+ if (rte_mempool_populate_anon(rte_mp) == 0) {
+ rte_mempool_free(rte_mp);
+ rte_mp = NULL;
+ goto err;
+ }
+ rte_pktmbuf_pool_init(rte_mp, NULL);
+ rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
+ rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
+ break;
+ }
+ case MP_ALLOC_XMEM:
+ case MP_ALLOC_XMEM_HUGE:
+ {
+ int heap_socket;
+ bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
+
+ if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
+ rte_exit(EXIT_FAILURE, "Could not create external memory\n");
+
+ heap_socket =
+ rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
+ if (heap_socket < 0)
+ rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
+
+ TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
+ rte_mbuf_best_mempool_ops());
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size,
+ heap_socket);
+ break;
+ }
+ default:
+ {
+ rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
+ }
}
err:
} else if (verbose_level > 0) {
rte_mempool_dump(stdout, rte_mp);
}
+ return rte_mp;
}
/*
uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
struct rte_gro_param gro_param;
uint32_t gso_types;
+ uint16_t data_size;
+ bool warning = 0;
+ int k;
memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
- if (numa_support) {
- memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
- }
-
/* Configuration of logical cores. */
fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
sizeof(struct fwd_lcore *) * nb_lcores,
port->dev_conf.txmode = tx_mode;
port->dev_conf.rxmode = rx_mode;
rte_eth_dev_info_get(pid, &port->dev_info);
+
if (!(port->dev_info.tx_offload_capa &
DEV_TX_OFFLOAD_MBUF_FAST_FREE))
port->dev_conf.txmode.offloads &=
~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ if (!(port->dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_MATCH_METADATA))
+ port->dev_conf.txmode.offloads &=
+ ~DEV_TX_OFFLOAD_MATCH_METADATA;
if (numa_support) {
if (port_numa[pid] != NUMA_NO_CONFIG)
port_per_socket[port_numa[pid]]++;
else {
uint32_t socket_id = rte_eth_dev_socket_id(pid);
- /* if socket_id is invalid, set to 0 */
+ /*
+ * if socket_id is invalid,
+ * set to the first available socket.
+ */
if (check_socket_id(socket_id) < 0)
- socket_id = 0;
+ socket_id = socket_ids[0];
port_per_socket[socket_id]++;
}
}
+ /* Apply Rx offloads configuration */
+ for (k = 0; k < port->dev_info.max_rx_queues; k++)
+ port->rx_conf[k].offloads =
+ port->dev_conf.rxmode.offloads;
+ /* Apply Tx offloads configuration */
+ for (k = 0; k < port->dev_info.max_tx_queues; k++)
+ port->tx_conf[k].offloads =
+ port->dev_conf.txmode.offloads;
+
/* set flag to initialize port/queue */
port->need_reconfig = 1;
port->need_reconfig_queues = 1;
+ port->tx_metadata = 0;
+
+ /* Check for maximum number of segments per MTU. Accordingly
+ * update the mbuf data size.
+ */
+ if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
+ data_size = rx_mode.max_rx_pkt_len /
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max;
+
+ if ((data_size + RTE_PKTMBUF_HEADROOM) >
+ mbuf_data_size) {
+ mbuf_data_size = data_size +
+ RTE_PKTMBUF_HEADROOM;
+ warning = 1;
+ }
+ }
}
+ if (warning)
+ TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
+ mbuf_data_size);
+
/*
* Create pools of mbuf.
* If NUMA support is disabled, create a single pool of mbuf in
uint8_t i;
for (i = 0; i < num_sockets; i++)
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
- socket_ids[i]);
+ mempools[i] = mbuf_pool_create(mbuf_data_size,
+ nb_mbuf_per_pool,
+ socket_ids[i]);
} else {
if (socket_num == UMA_NO_CONFIG)
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
+ mempools[0] = mbuf_pool_create(mbuf_data_size,
+ nb_mbuf_per_pool, 0);
else
- mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
- socket_num);
+ mempools[socket_num] = mbuf_pool_create
+ (mbuf_data_size,
+ nb_mbuf_per_pool,
+ socket_num);
}
init_port_config();
gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
- DEV_TX_OFFLOAD_GRE_TNL_TSO;
+ DEV_TX_OFFLOAD_GRE_TNL_TSO | DEV_TX_OFFLOAD_UDP_TSO;
/*
* Records which Mbuf pool to use by each logical core, if needed.
*/
fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
- fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
- ETHER_CRC_LEN;
+ fwd_lcores[lc_id]->gso_ctx.gso_size = RTE_ETHER_MAX_LEN -
+ RTE_ETHER_CRC_LEN;
fwd_lcores[lc_id]->gso_ctx.flag = 0;
}
"rte_gro_ctx_create() failed\n");
}
}
+
+#if defined RTE_LIBRTE_PMD_SOFTNIC
+ if (strcmp(cur_fwd_eng->fwd_mode_name, "softnic") == 0) {
+ RTE_ETH_FOREACH_DEV(pid) {
+ port = &ports[pid];
+ const char *driver = port->dev_info.driver_name;
+
+ if (strcmp(driver, "net_softnic") == 0)
+ port->softport.fwd_lcore_arg = fwd_lcores;
+ }
+ }
+#endif
+
}
else {
port->socket_id = rte_eth_dev_socket_id(pid);
- /* if socket_id is invalid, set to 0 */
+ /*
+ * if socket_id is invalid,
+ * set to the first available socket.
+ */
if (check_socket_id(port->socket_id) < 0)
- port->socket_id = 0;
+ port->socket_id = socket_ids[0];
}
}
else {
/* init new */
nb_fwd_streams = nb_fwd_streams_new;
- fwd_streams = rte_zmalloc("testpmd: fwd_streams",
- sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
- if (fwd_streams == NULL)
- rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
- "failed\n", nb_fwd_streams);
+ if (nb_fwd_streams) {
+ fwd_streams = rte_zmalloc("testpmd: fwd_streams",
+ sizeof(struct fwd_stream *) * nb_fwd_streams,
+ RTE_CACHE_LINE_SIZE);
+ if (fwd_streams == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
+ " (struct fwd_stream *)) failed\n",
+ nb_fwd_streams);
- for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
- fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
- sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
- if (fwd_streams[sm_id] == NULL)
- rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
- " failed\n");
+ for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
+ fwd_streams[sm_id] = rte_zmalloc("testpmd:"
+ " struct fwd_stream", sizeof(struct fwd_stream),
+ RTE_CACHE_LINE_SIZE);
+ if (fwd_streams[sm_id] == NULL)
+ rte_exit(EXIT_FAILURE, "rte_zmalloc"
+ "(struct fwd_stream) failed\n");
+ }
}
return 0;
pktnb_stats[1] = pktnb_stats[0];
burst_stats[0] = nb_burst;
pktnb_stats[0] = nb_pkt;
+ } else if (nb_burst > burst_stats[1]) {
+ burst_stats[1] = nb_burst;
+ pktnb_stats[1] = nb_pkt;
}
}
if (total_burst == 0)
#endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
static void
-fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
+fwd_stream_stats_display(streamid_t stream_id)
{
- struct rte_port *port;
- uint8_t i;
+ struct fwd_stream *fs;
+ static const char *fwd_top_stats_border = "-------";
+
+ fs = fwd_streams[stream_id];
+ if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
+ (fs->fwd_dropped == 0))
+ return;
+ printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
+ "TX Port=%2d/Queue=%2d %s\n",
+ fwd_top_stats_border, fs->rx_port, fs->rx_queue,
+ fs->tx_port, fs->tx_queue, fwd_top_stats_border);
+ printf(" RX-packets: %-14"PRIu64" TX-packets: %-14"PRIu64
+ " TX-dropped: %-14"PRIu64,
+ fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
+
+ /* if checksum mode */
+ if (cur_fwd_eng == &csum_fwd_engine) {
+ printf(" RX- bad IP checksum: %-14"PRIu64
+ " Rx- bad L4 checksum: %-14"PRIu64
+ " Rx- bad outer L4 checksum: %-14"PRIu64"\n",
+ fs->rx_bad_ip_csum, fs->rx_bad_l4_csum,
+ fs->rx_bad_outer_l4_csum);
+ } else {
+ printf("\n");
+ }
+#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
+ pkt_burst_stats_display("RX", &fs->rx_burst_stats);
+ pkt_burst_stats_display("TX", &fs->tx_burst_stats);
+#endif
+}
+
+void
+fwd_stats_display(void)
+{
static const char *fwd_stats_border = "----------------------";
+ static const char *acc_stats_border = "+++++++++++++++";
+ struct {
+ struct fwd_stream *rx_stream;
+ struct fwd_stream *tx_stream;
+ uint64_t tx_dropped;
+ uint64_t rx_bad_ip_csum;
+ uint64_t rx_bad_l4_csum;
+ uint64_t rx_bad_outer_l4_csum;
+ } ports_stats[RTE_MAX_ETHPORTS];
+ uint64_t total_rx_dropped = 0;
+ uint64_t total_tx_dropped = 0;
+ uint64_t total_rx_nombuf = 0;
+ struct rte_eth_stats stats;
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ uint64_t fwd_cycles = 0;
+#endif
+ uint64_t total_recv = 0;
+ uint64_t total_xmit = 0;
+ struct rte_port *port;
+ streamid_t sm_id;
+ portid_t pt_id;
+ int i;
- port = &ports[port_id];
- printf("\n %s Forward statistics for port %-2d %s\n",
- fwd_stats_border, port_id, fwd_stats_border);
+ memset(ports_stats, 0, sizeof(ports_stats));
- if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
- printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
- "%-"PRIu64"\n",
- stats->ipackets, stats->imissed,
- (uint64_t) (stats->ipackets + stats->imissed));
+ for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+ struct fwd_stream *fs = fwd_streams[sm_id];
- if (cur_fwd_eng == &csum_fwd_engine)
- printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
- port->rx_bad_ip_csum, port->rx_bad_l4_csum);
- if ((stats->ierrors + stats->rx_nombuf) > 0) {
- printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
- printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
+ if (cur_fwd_config.nb_fwd_streams >
+ cur_fwd_config.nb_fwd_ports) {
+ fwd_stream_stats_display(sm_id);
+ } else {
+ ports_stats[fs->tx_port].tx_stream = fs;
+ ports_stats[fs->rx_port].rx_stream = fs;
}
- printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
- "%-"PRIu64"\n",
- stats->opackets, port->tx_dropped,
- (uint64_t) (stats->opackets + port->tx_dropped));
- }
- else {
- printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
- "%14"PRIu64"\n",
- stats->ipackets, stats->imissed,
- (uint64_t) (stats->ipackets + stats->imissed));
+ ports_stats[fs->tx_port].tx_dropped += fs->fwd_dropped;
- if (cur_fwd_eng == &csum_fwd_engine)
- printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
- port->rx_bad_ip_csum, port->rx_bad_l4_csum);
- if ((stats->ierrors + stats->rx_nombuf) > 0) {
- printf(" RX-error:%"PRIu64"\n", stats->ierrors);
- printf(" RX-nombufs: %14"PRIu64"\n",
- stats->rx_nombuf);
- }
+ ports_stats[fs->rx_port].rx_bad_ip_csum += fs->rx_bad_ip_csum;
+ ports_stats[fs->rx_port].rx_bad_l4_csum += fs->rx_bad_l4_csum;
+ ports_stats[fs->rx_port].rx_bad_outer_l4_csum +=
+ fs->rx_bad_outer_l4_csum;
- printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
- "%14"PRIu64"\n",
- stats->opackets, port->tx_dropped,
- (uint64_t) (stats->opackets + port->tx_dropped));
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ fwd_cycles += fs->core_cycles;
+#endif
}
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+ uint8_t j;
+
+ pt_id = fwd_ports_ids[i];
+ port = &ports[pt_id];
+
+ rte_eth_stats_get(pt_id, &stats);
+ stats.ipackets -= port->stats.ipackets;
+ stats.opackets -= port->stats.opackets;
+ stats.ibytes -= port->stats.ibytes;
+ stats.obytes -= port->stats.obytes;
+ stats.imissed -= port->stats.imissed;
+ stats.oerrors -= port->stats.oerrors;
+ stats.rx_nombuf -= port->stats.rx_nombuf;
+
+ total_recv += stats.ipackets;
+ total_xmit += stats.opackets;
+ total_rx_dropped += stats.imissed;
+ total_tx_dropped += ports_stats[pt_id].tx_dropped;
+ total_tx_dropped += stats.oerrors;
+ total_rx_nombuf += stats.rx_nombuf;
+
+ printf("\n %s Forward statistics for port %-2d %s\n",
+ fwd_stats_border, pt_id, fwd_stats_border);
+
+ if (!port->rx_queue_stats_mapping_enabled &&
+ !port->tx_queue_stats_mapping_enabled) {
+ printf(" RX-packets: %-14"PRIu64
+ " RX-dropped: %-14"PRIu64
+ "RX-total: %-"PRIu64"\n",
+ stats.ipackets, stats.imissed,
+ stats.ipackets + stats.imissed);
+
+ if (cur_fwd_eng == &csum_fwd_engine)
+ printf(" Bad-ipcsum: %-14"PRIu64
+ " Bad-l4csum: %-14"PRIu64
+ "Bad-outer-l4csum: %-14"PRIu64"\n",
+ ports_stats[pt_id].rx_bad_ip_csum,
+ ports_stats[pt_id].rx_bad_l4_csum,
+ ports_stats[pt_id].rx_bad_outer_l4_csum);
+ if (stats.ierrors + stats.rx_nombuf > 0) {
+ printf(" RX-error: %-"PRIu64"\n",
+ stats.ierrors);
+ printf(" RX-nombufs: %-14"PRIu64"\n",
+ stats.rx_nombuf);
+ }
+
+ printf(" TX-packets: %-14"PRIu64
+ " TX-dropped: %-14"PRIu64
+ "TX-total: %-"PRIu64"\n",
+ stats.opackets, ports_stats[pt_id].tx_dropped,
+ stats.opackets + ports_stats[pt_id].tx_dropped);
+ } else {
+ printf(" RX-packets: %14"PRIu64
+ " RX-dropped:%14"PRIu64
+ " RX-total:%14"PRIu64"\n",
+ stats.ipackets, stats.imissed,
+ stats.ipackets + stats.imissed);
+
+ if (cur_fwd_eng == &csum_fwd_engine)
+ printf(" Bad-ipcsum:%14"PRIu64
+ " Bad-l4csum:%14"PRIu64
+ " Bad-outer-l4csum: %-14"PRIu64"\n",
+ ports_stats[pt_id].rx_bad_ip_csum,
+ ports_stats[pt_id].rx_bad_l4_csum,
+ ports_stats[pt_id].rx_bad_outer_l4_csum);
+ if ((stats.ierrors + stats.rx_nombuf) > 0) {
+ printf(" RX-error:%"PRIu64"\n", stats.ierrors);
+ printf(" RX-nombufs: %14"PRIu64"\n",
+ stats.rx_nombuf);
+ }
+
+ printf(" TX-packets: %14"PRIu64
+ " TX-dropped:%14"PRIu64
+ " TX-total:%14"PRIu64"\n",
+ stats.opackets, ports_stats[pt_id].tx_dropped,
+ stats.opackets + ports_stats[pt_id].tx_dropped);
+ }
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- if (port->rx_stream)
- pkt_burst_stats_display("RX",
- &port->rx_stream->rx_burst_stats);
- if (port->tx_stream)
- pkt_burst_stats_display("TX",
- &port->tx_stream->tx_burst_stats);
+ if (ports_stats[pt_id].rx_stream)
+ pkt_burst_stats_display("RX",
+ &ports_stats[pt_id].rx_stream->rx_burst_stats);
+ if (ports_stats[pt_id].tx_stream)
+ pkt_burst_stats_display("TX",
+ &ports_stats[pt_id].tx_stream->tx_burst_stats);
#endif
- if (port->rx_queue_stats_mapping_enabled) {
- printf("\n");
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
- printf(" Stats reg %2d RX-packets:%14"PRIu64
- " RX-errors:%14"PRIu64
- " RX-bytes:%14"PRIu64"\n",
- i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
+ if (port->rx_queue_stats_mapping_enabled) {
+ printf("\n");
+ for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
+ printf(" Stats reg %2d RX-packets:%14"PRIu64
+ " RX-errors:%14"PRIu64
+ " RX-bytes:%14"PRIu64"\n",
+ j, stats.q_ipackets[j],
+ stats.q_errors[j], stats.q_ibytes[j]);
+ }
+ printf("\n");
}
- printf("\n");
- }
- if (port->tx_queue_stats_mapping_enabled) {
- for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
- printf(" Stats reg %2d TX-packets:%14"PRIu64
- " TX-bytes:%14"PRIu64"\n",
- i, stats->q_opackets[i], stats->q_obytes[i]);
+ if (port->tx_queue_stats_mapping_enabled) {
+ for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) {
+ printf(" Stats reg %2d TX-packets:%14"PRIu64
+ " TX-bytes:%14"
+ PRIu64"\n",
+ j, stats.q_opackets[j],
+ stats.q_obytes[j]);
+ }
}
+
+ printf(" %s--------------------------------%s\n",
+ fwd_stats_border, fwd_stats_border);
}
- printf(" %s--------------------------------%s\n",
- fwd_stats_border, fwd_stats_border);
+ printf("\n %s Accumulated forward statistics for all ports"
+ "%s\n",
+ acc_stats_border, acc_stats_border);
+ printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
+ "%-"PRIu64"\n"
+ " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
+ "%-"PRIu64"\n",
+ total_recv, total_rx_dropped, total_recv + total_rx_dropped,
+ total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
+ if (total_rx_nombuf > 0)
+ printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
+ printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
+ "%s\n",
+ acc_stats_border, acc_stats_border);
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ if (total_recv > 0)
+ printf("\n CPU cycles/packet=%u (total cycles="
+ "%"PRIu64" / total RX packets=%"PRIu64")\n",
+ (unsigned int)(fwd_cycles / total_recv),
+ fwd_cycles, total_recv);
+#endif
}
-static void
-fwd_stream_stats_display(streamid_t stream_id)
+void
+fwd_stats_reset(void)
{
- struct fwd_stream *fs;
- static const char *fwd_top_stats_border = "-------";
-
- fs = fwd_streams[stream_id];
- if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
- (fs->fwd_dropped == 0))
- return;
- printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
- "TX Port=%2d/Queue=%2d %s\n",
- fwd_top_stats_border, fs->rx_port, fs->rx_queue,
- fs->tx_port, fs->tx_queue, fwd_top_stats_border);
- printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
- fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
+ streamid_t sm_id;
+ portid_t pt_id;
+ int i;
- /* if checksum mode */
- if (cur_fwd_eng == &csum_fwd_engine) {
- printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
- "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
+ for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
+ pt_id = fwd_ports_ids[i];
+ rte_eth_stats_get(pt_id, &ports[pt_id].stats);
}
+ for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
+ struct fwd_stream *fs = fwd_streams[sm_id];
+
+ fs->rx_packets = 0;
+ fs->tx_packets = 0;
+ fs->fwd_dropped = 0;
+ fs->rx_bad_ip_csum = 0;
+ fs->rx_bad_l4_csum = 0;
+ fs->rx_bad_outer_l4_csum = 0;
#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- pkt_burst_stats_display("RX", &fs->rx_burst_stats);
- pkt_burst_stats_display("TX", &fs->tx_burst_stats);
+ memset(&fs->rx_burst_stats, 0, sizeof(fs->rx_burst_stats));
+ memset(&fs->tx_burst_stats, 0, sizeof(fs->tx_burst_stats));
+#endif
+#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
+ fs->core_cycles = 0;
#endif
+ }
}
static void
uint64_t tics_per_1sec;
uint64_t tics_datum;
uint64_t tics_current;
- uint16_t idx_port;
+ uint16_t i, cnt_ports;
+ cnt_ports = nb_ports;
tics_datum = rte_rdtsc();
tics_per_1sec = rte_get_timer_hz();
#endif
tics_current = rte_rdtsc();
if (tics_current - tics_datum >= tics_per_1sec) {
/* Periodic bitrate calculation */
- RTE_ETH_FOREACH_DEV(idx_port)
+ for (i = 0; i < cnt_ports; i++)
rte_stats_bitrate_calc(bitrate_data,
- idx_port);
+ ports_ids[i]);
tics_datum = tics_current;
}
}
struct rte_port *port;
unsigned int i;
portid_t pt_id;
- streamid_t sm_id;
if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
return;
}
- if (init_fwd_streams() < 0) {
- printf("Fail from init_fwd_streams()\n");
- return;
- }
if(dcb_test) {
for (i = 0; i < nb_fwd_ports; i++) {
}
test_done = 0;
+ fwd_config_setup();
+
if(!no_flush_rx)
flush_fwd_rx_queues();
- fwd_config_setup();
pkt_fwd_config_display(&cur_fwd_config);
rxtx_config_display();
+ fwd_stats_reset();
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
pt_id = fwd_ports_ids[i];
port = &ports[pt_id];
- rte_eth_stats_get(pt_id, &port->stats);
- port->tx_dropped = 0;
-
map_port_queue_stats_mapping_registers(pt_id, port);
}
- for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
- fwd_streams[sm_id]->rx_packets = 0;
- fwd_streams[sm_id]->tx_packets = 0;
- fwd_streams[sm_id]->fwd_dropped = 0;
- fwd_streams[sm_id]->rx_bad_ip_csum = 0;
- fwd_streams[sm_id]->rx_bad_l4_csum = 0;
-
-#ifdef RTE_TEST_PMD_RECORD_BURST_STATS
- memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
- sizeof(fwd_streams[sm_id]->rx_burst_stats));
- memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
- sizeof(fwd_streams[sm_id]->tx_burst_stats));
-#endif
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- fwd_streams[sm_id]->core_cycles = 0;
-#endif
- }
if (with_tx_first) {
port_fwd_begin = tx_only_engine.port_fwd_begin;
if (port_fwd_begin != NULL) {
void
stop_packet_forwarding(void)
{
- struct rte_eth_stats stats;
- struct rte_port *port;
- port_fwd_end_t port_fwd_end;
+ port_fwd_end_t port_fwd_end;
+ lcoreid_t lc_id;
+ portid_t pt_id;
int i;
- portid_t pt_id;
- streamid_t sm_id;
- lcoreid_t lc_id;
- uint64_t total_recv;
- uint64_t total_xmit;
- uint64_t total_rx_dropped;
- uint64_t total_tx_dropped;
- uint64_t total_rx_nombuf;
- uint64_t tx_dropped;
- uint64_t rx_bad_ip_csum;
- uint64_t rx_bad_l4_csum;
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- uint64_t fwd_cycles;
-#endif
-
- static const char *acc_stats_border = "+++++++++++++++";
if (test_done) {
printf("Packet forwarding not started\n");
(*port_fwd_end)(pt_id);
}
}
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- fwd_cycles = 0;
-#endif
- for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
- if (cur_fwd_config.nb_fwd_streams >
- cur_fwd_config.nb_fwd_ports) {
- fwd_stream_stats_display(sm_id);
- ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
- ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
- } else {
- ports[fwd_streams[sm_id]->tx_port].tx_stream =
- fwd_streams[sm_id];
- ports[fwd_streams[sm_id]->rx_port].rx_stream =
- fwd_streams[sm_id];
- }
- tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
- tx_dropped = (uint64_t) (tx_dropped +
- fwd_streams[sm_id]->fwd_dropped);
- ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
-
- rx_bad_ip_csum =
- ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
- rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
- fwd_streams[sm_id]->rx_bad_ip_csum);
- ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
- rx_bad_ip_csum;
-
- rx_bad_l4_csum =
- ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
- rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
- fwd_streams[sm_id]->rx_bad_l4_csum);
- ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
- rx_bad_l4_csum;
-
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- fwd_cycles = (uint64_t) (fwd_cycles +
- fwd_streams[sm_id]->core_cycles);
-#endif
- }
- total_recv = 0;
- total_xmit = 0;
- total_rx_dropped = 0;
- total_tx_dropped = 0;
- total_rx_nombuf = 0;
- for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
- pt_id = fwd_ports_ids[i];
-
- port = &ports[pt_id];
- rte_eth_stats_get(pt_id, &stats);
- stats.ipackets -= port->stats.ipackets;
- port->stats.ipackets = 0;
- stats.opackets -= port->stats.opackets;
- port->stats.opackets = 0;
- stats.ibytes -= port->stats.ibytes;
- port->stats.ibytes = 0;
- stats.obytes -= port->stats.obytes;
- port->stats.obytes = 0;
- stats.imissed -= port->stats.imissed;
- port->stats.imissed = 0;
- stats.oerrors -= port->stats.oerrors;
- port->stats.oerrors = 0;
- stats.rx_nombuf -= port->stats.rx_nombuf;
- port->stats.rx_nombuf = 0;
-
- total_recv += stats.ipackets;
- total_xmit += stats.opackets;
- total_rx_dropped += stats.imissed;
- total_tx_dropped += port->tx_dropped;
- total_rx_nombuf += stats.rx_nombuf;
- fwd_port_stats_display(pt_id, &stats);
- }
+ fwd_stats_display();
- printf("\n %s Accumulated forward statistics for all ports"
- "%s\n",
- acc_stats_border, acc_stats_border);
- printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
- "%-"PRIu64"\n"
- " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
- "%-"PRIu64"\n",
- total_recv, total_rx_dropped, total_recv + total_rx_dropped,
- total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
- if (total_rx_nombuf > 0)
- printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
- printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
- "%s\n",
- acc_stats_border, acc_stats_border);
-#ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
- if (total_recv > 0)
- printf("\n CPU cycles/packet=%u (total cycles="
- "%"PRIu64" / total RX packets=%"PRIu64")\n",
- (unsigned int)(fwd_cycles / total_recv),
- fwd_cycles, total_recv);
-#endif
printf("\nDone.\n");
test_done = 1;
}
return 1;
}
-static int
-port_is_closed(portid_t port_id)
-{
- if (port_id_is_invalid(port_id, ENABLED_WARN))
- return 0;
-
- if (ports[port_id].port_status != RTE_PORT_CLOSED)
- return 0;
-
- return 1;
-}
-
int
start_port(portid_t pid)
{
portid_t pi;
queueid_t qi;
struct rte_port *port;
- struct ether_addr mac_addr;
- enum rte_eth_event_type event_type;
+ struct rte_ether_addr mac_addr;
if (port_id_is_invalid(pid, ENABLED_WARN))
return 0;
return -1;
}
}
-
+ configure_rxtx_dump_callbacks(0);
printf("Configuring Port %d (socket %u)\n", pi,
port->socket_id);
/* configure port */
port->need_reconfig_queues = 0;
/* setup tx queues */
for (qi = 0; qi < nb_txq; qi++) {
- port->tx_conf[qi].txq_flags =
- ETH_TXQ_FLAGS_IGNORE;
- /* Apply Tx offloads configuration */
- port->tx_conf[qi].offloads =
- port->dev_conf.txmode.offloads;
if ((numa_support) &&
(txring_numa[pi] != NUMA_NO_CONFIG))
diag = rte_eth_tx_queue_setup(pi, qi,
return -1;
}
for (qi = 0; qi < nb_rxq; qi++) {
- /* Apply Rx offloads configuration */
- port->rx_conf[qi].offloads =
- port->dev_conf.rxmode.offloads;
/* setup rx queues */
if ((numa_support) &&
(rxring_numa[pi] != NUMA_NO_CONFIG)) {
}
diag = rte_eth_rx_queue_setup(pi, qi,
- port->nb_rx_desc[pi],
+ port->nb_rx_desc[qi],
rxring_numa[pi],
&(port->rx_conf[qi]),
mp);
return -1;
}
diag = rte_eth_rx_queue_setup(pi, qi,
- port->nb_rx_desc[pi],
+ port->nb_rx_desc[qi],
port->socket_id,
&(port->rx_conf[qi]),
mp);
return -1;
}
}
-
+ configure_rxtx_dump_callbacks(verbose_level);
/* start port */
if (rte_eth_dev_start(pi) < 0) {
printf("Fail to start port %d\n", pi);
need_check_link_status = 1;
}
- for (event_type = RTE_ETH_EVENT_UNKNOWN;
- event_type < RTE_ETH_EVENT_MAX;
- event_type++) {
- diag = rte_eth_dev_callback_register(RTE_ETH_ALL,
- event_type,
- eth_event_callback,
- NULL);
- if (diag) {
- printf("Failed to setup even callback for event %d\n",
- event_type);
- return -1;
- }
- }
-
if (need_check_link_status == 1 && !no_link_check)
check_all_ports_link_status(RTE_PORT_ALL);
else if (need_check_link_status == 0)
printf("Done\n");
}
+static void
+remove_invalid_ports_in(portid_t *array, portid_t *total)
+{
+ portid_t i;
+ portid_t new_total = 0;
+
+ for (i = 0; i < *total; i++)
+ if (!port_id_is_invalid(array[i], DISABLED_WARN)) {
+ array[new_total] = array[i];
+ new_total++;
+ }
+ *total = new_total;
+}
+
+static void
+remove_invalid_ports(void)
+{
+ remove_invalid_ports_in(ports_ids, &nb_ports);
+ remove_invalid_ports_in(fwd_ports_ids, &nb_fwd_ports);
+ nb_cfg_ports = nb_fwd_ports;
+}
+
void
close_port(portid_t pid)
{
port_flow_flush(pi);
rte_eth_dev_close(pi);
+ remove_invalid_ports();
+
if (rte_atomic16_cmpset(&(port->port_status),
RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
printf("Port %d cannot be set to closed\n", pi);
printf("Done\n");
}
-static int
-eth_dev_event_callback_register(void)
-{
- int ret;
-
- /* register the device event callback */
- ret = rte_dev_event_callback_register(NULL,
- eth_dev_event_callback, NULL);
- if (ret) {
- printf("Failed to register device event callback\n");
- return -1;
- }
-
- return 0;
-}
-
-
-static int
-eth_dev_event_callback_unregister(void)
-{
- int ret;
-
- /* unregister the device event callback */
- ret = rte_dev_event_callback_unregister(NULL,
- eth_dev_event_callback, NULL);
- if (ret < 0) {
- printf("Failed to unregister device event callback\n");
- return -1;
- }
-
- return 0;
-}
-
void
attach_port(char *identifier)
{
- portid_t pi = 0;
- unsigned int socket_id;
+ portid_t pi;
+ struct rte_dev_iterator iterator;
printf("Attaching a new port...\n");
return;
}
- if (rte_eth_dev_attach(identifier, &pi))
+ if (rte_dev_probe(identifier) != 0) {
+ TESTPMD_LOG(ERR, "Failed to attach port %s\n", identifier);
return;
+ }
+
+ /* first attach mode: event */
+ if (setup_on_probe_event) {
+ /* new ports are detected on RTE_ETH_EVENT_NEW event */
+ for (pi = 0; pi < RTE_MAX_ETHPORTS; pi++)
+ if (ports[pi].port_status == RTE_PORT_HANDLING &&
+ ports[pi].need_setup != 0)
+ setup_attached_port(pi);
+ return;
+ }
+
+ /* second attach mode: iterator */
+ RTE_ETH_FOREACH_MATCHING_DEV(pi, identifier, &iterator) {
+ /* setup ports matching the devargs used for probing */
+ if (port_is_forwarding(pi))
+ continue; /* port was already attached before */
+ setup_attached_port(pi);
+ }
+}
+
+static void
+setup_attached_port(portid_t pi)
+{
+ unsigned int socket_id;
socket_id = (unsigned)rte_eth_dev_socket_id(pi);
- /* if socket_id is invalid, set to 0 */
+ /* if socket_id is invalid, set to the first available socket. */
if (check_socket_id(socket_id) < 0)
- socket_id = 0;
+ socket_id = socket_ids[0];
reconfig(pi, socket_id);
rte_eth_promiscuous_enable(pi);
- nb_ports = rte_eth_dev_count_avail();
-
+ ports_ids[nb_ports++] = pi;
+ fwd_ports_ids[nb_fwd_ports++] = pi;
+ nb_cfg_ports = nb_fwd_ports;
+ ports[pi].need_setup = 0;
ports[pi].port_status = RTE_PORT_STOPPED;
printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
}
void
-detach_port(portid_t port_id)
+detach_port_device(portid_t port_id)
{
- char name[RTE_ETH_NAME_MAX_LEN];
+ struct rte_device *dev;
+ portid_t sibling;
- printf("Detaching a port...\n");
+ printf("Removing a device...\n");
- if (!port_is_closed(port_id)) {
- printf("Please close port first\n");
+ dev = rte_eth_devices[port_id].device;
+ if (dev == NULL) {
+ printf("Device already removed\n");
return;
}
- if (ports[port_id].flow_list)
- port_flow_flush(port_id);
+ if (ports[port_id].port_status != RTE_PORT_CLOSED) {
+ if (ports[port_id].port_status != RTE_PORT_STOPPED) {
+ printf("Port not stopped\n");
+ return;
+ }
+ printf("Port was not closed\n");
+ if (ports[port_id].flow_list)
+ port_flow_flush(port_id);
+ }
- if (rte_eth_dev_detach(port_id, name)) {
- TESTPMD_LOG(ERR, "Failed to detach port '%s'\n", name);
+ if (rte_dev_remove(dev) != 0) {
+ TESTPMD_LOG(ERR, "Failed to detach device %s\n", dev->name);
return;
}
+ RTE_ETH_FOREACH_DEV_OF(sibling, dev) {
+ /* reset mapping between old ports and removed device */
+ rte_eth_devices[sibling].device = NULL;
+ if (ports[sibling].port_status != RTE_PORT_CLOSED) {
+ /* sibling ports are forced to be closed */
+ ports[sibling].port_status = RTE_PORT_CLOSED;
+ printf("Port %u is closed\n", sibling);
+ }
+ }
- nb_ports = rte_eth_dev_count_avail();
+ remove_invalid_ports();
- printf("Port '%s' is detached. Now total ports is %d\n",
- name, nb_ports);
+ printf("Device of port %u is detached\n", port_id);
+ printf("Now total ports is %d\n", nb_ports);
printf("Done\n");
return;
}
{
portid_t pt_id;
int ret;
+ int i;
if (test_done == 0)
stop_packet_forwarding();
+ for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ if (mempools[i]) {
+ if (mp_alloc_type == MP_ALLOC_ANON)
+ rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
+ NULL);
+ }
+ }
if (ports != NULL) {
no_link_check = 1;
RTE_ETH_FOREACH_DEV(pt_id) {
- printf("\nShutting down port %d...\n", pt_id);
+ printf("\nStopping port %d...\n", pt_id);
fflush(stdout);
stop_port(pt_id);
+ }
+ RTE_ETH_FOREACH_DEV(pt_id) {
+ printf("\nShutting down port %d...\n", pt_id);
+ fflush(stdout);
close_port(pt_id);
}
}
if (hot_plug) {
ret = rte_dev_event_monitor_stop();
- if (ret)
+ if (ret) {
RTE_LOG(ERR, EAL,
"fail to stop device event monitor.");
+ return;
+ }
- ret = eth_dev_event_callback_unregister();
- if (ret)
+ ret = rte_dev_event_callback_unregister(NULL,
+ dev_event_callback, NULL);
+ if (ret < 0) {
+ RTE_LOG(ERR, EAL,
+ "fail to unregister device event callback.\n");
+ return;
+ }
+
+ ret = rte_dev_hotplug_handle_disable();
+ if (ret) {
RTE_LOG(ERR, EAL,
- "fail to unregister all event callbacks.");
+ "fail to disable hotplug handling.\n");
+ return;
+ }
+ }
+ for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+ if (mempools[i])
+ rte_mempool_free(mempools[i]);
}
printf("\nBye...\n");
}
}
+/*
+ * This callback is for remove a port for a device. It has limitation because
+ * it is not for multiple port removal for a device.
+ * TODO: the device detach invoke will plan to be removed from user side to
+ * eal. And convert all PMDs to free port resources on ether device closing.
+ */
static void
-rmv_event_callback(void *arg)
+rmv_port_callback(void *arg)
{
- struct rte_eth_dev *dev;
+ int need_to_start = 0;
+ int org_no_link_check = no_link_check;
portid_t port_id = (intptr_t)arg;
RTE_ETH_VALID_PORTID_OR_RET(port_id);
- dev = &rte_eth_devices[port_id];
+ if (!test_done && port_is_forwarding(port_id)) {
+ need_to_start = 1;
+ stop_packet_forwarding();
+ }
+ no_link_check = 1;
stop_port(port_id);
+ no_link_check = org_no_link_check;
close_port(port_id);
- printf("removing device %s\n", dev->device->name);
- if (rte_eal_dev_detach(dev->device))
- TESTPMD_LOG(ERR, "Failed to detach device %s\n",
- dev->device->name);
+ detach_port_device(port_id);
+ if (need_to_start)
+ start_packet_forwarding(0);
}
/* This function is used by the interrupt thread */
eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
{
- static const char * const event_desc[] = {
- [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
- [RTE_ETH_EVENT_INTR_LSC] = "LSC",
- [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
- [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
- [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
- [RTE_ETH_EVENT_IPSEC] = "IPsec",
- [RTE_ETH_EVENT_MACSEC] = "MACsec",
- [RTE_ETH_EVENT_INTR_RMV] = "device removal",
- [RTE_ETH_EVENT_NEW] = "device probed",
- [RTE_ETH_EVENT_DESTROY] = "device released",
- [RTE_ETH_EVENT_MAX] = NULL,
- };
-
RTE_SET_USED(param);
RTE_SET_USED(ret_param);
if (type >= RTE_ETH_EVENT_MAX) {
- fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
+ fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
port_id, __func__, type);
fflush(stderr);
} else if (event_print_mask & (UINT32_C(1) << type)) {
- printf("\nPort %" PRIu8 ": %s event\n", port_id,
- event_desc[type]);
+ printf("\nPort %" PRIu16 ": %s event\n", port_id,
+ eth_event_desc[type]);
fflush(stdout);
}
- if (port_id_is_invalid(port_id, DISABLED_WARN))
- return 0;
-
switch (type) {
+ case RTE_ETH_EVENT_NEW:
+ ports[port_id].need_setup = 1;
+ ports[port_id].port_status = RTE_PORT_HANDLING;
+ break;
case RTE_ETH_EVENT_INTR_RMV:
+ if (port_id_is_invalid(port_id, DISABLED_WARN))
+ break;
if (rte_eal_alarm_set(100000,
- rmv_event_callback, (void *)(intptr_t)port_id))
+ rmv_port_callback, (void *)(intptr_t)port_id))
fprintf(stderr, "Could not set up deferred device removal\n");
break;
default:
return 0;
}
+static int
+register_eth_event_callback(void)
+{
+ int ret;
+ enum rte_eth_event_type event;
+
+ for (event = RTE_ETH_EVENT_UNKNOWN;
+ event < RTE_ETH_EVENT_MAX; event++) {
+ ret = rte_eth_dev_callback_register(RTE_ETH_ALL,
+ event,
+ eth_event_callback,
+ NULL);
+ if (ret != 0) {
+ TESTPMD_LOG(ERR, "Failed to register callback for "
+ "%s event\n", eth_event_desc[event]);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
/* This function is used by the interrupt thread */
static void
-eth_dev_event_callback(char *device_name, enum rte_dev_event_type type,
+dev_event_callback(const char *device_name, enum rte_dev_event_type type,
__rte_unused void *arg)
{
+ uint16_t port_id;
+ int ret;
+
if (type >= RTE_DEV_EVENT_MAX) {
fprintf(stderr, "%s called upon invalid event %d\n",
__func__, type);
switch (type) {
case RTE_DEV_EVENT_REMOVE:
- RTE_LOG(ERR, EAL, "The device: %s has been removed!\n",
+ RTE_LOG(DEBUG, EAL, "The device: %s has been removed!\n",
device_name);
- /* TODO: After finish failure handle, begin to stop
- * packet forward, stop port, close port, detach port.
+ ret = rte_eth_dev_get_port_by_name(device_name, &port_id);
+ if (ret) {
+ RTE_LOG(ERR, EAL, "can not get port by device %s!\n",
+ device_name);
+ return;
+ }
+ /*
+ * Because the user's callback is invoked in eal interrupt
+ * callback, the interrupt callback need to be finished before
+ * it can be unregistered when detaching device. So finish
+ * callback soon and use a deferred removal to detach device
+ * is need. It is a workaround, once the device detaching be
+ * moved into the eal in the future, the deferred removal could
+ * be deleted.
*/
+ if (rte_eal_alarm_set(100000,
+ rmv_port_callback, (void *)(intptr_t)port_id))
+ RTE_LOG(ERR, EAL,
+ "Could not set up deferred device removal\n");
break;
case RTE_DEV_EVENT_ADD:
RTE_LOG(ERR, EAL, "The device: %s has been added!\n",
rxtx_port_config(struct rte_port *port)
{
uint16_t qid;
+ uint64_t offloads;
for (qid = 0; qid < nb_rxq; qid++) {
+ offloads = port->rx_conf[qid].offloads;
port->rx_conf[qid] = port->dev_info.default_rxconf;
+ port->rx_conf[qid].offloads |= offloads;
/* Check if any Rx parameters have been passed */
if (rx_pthresh != RTE_PMD_PARAM_UNSET)
}
for (qid = 0; qid < nb_txq; qid++) {
+ offloads = port->tx_conf[qid].offloads;
port->tx_conf[qid] = port->dev_info.default_txconf;
+ port->tx_conf[qid].offloads |= offloads;
/* Check if any Tx parameters have been passed */
if (tx_pthresh != RTE_PMD_PARAM_UNSET)
{
portid_t pid;
struct rte_port *port;
- struct rte_eth_dev_info dev_info;
RTE_ETH_FOREACH_DEV(pid) {
port = &ports[pid];
port->dev_conf.fdir_conf = fdir_conf;
+ rte_eth_dev_info_get(pid, &port->dev_info);
if (nb_rxq > 1) {
- rte_eth_dev_info_get(pid, &dev_info);
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf =
- rss_hf & dev_info.flow_type_rss_offloads;
+ rss_hf & port->dev_info.flow_type_rss_offloads;
} else {
port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
(rte_eth_devices[pid].data->dev_flags &
RTE_ETH_DEV_INTR_RMV))
port->dev_conf.intr_conf.rmv = 1;
-
-#if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
- /* Detect softnic port */
- if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
- port->softnic_enable = 1;
- memset(&port->softport, 0, sizeof(struct softnic_port));
-
- if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
- port->softport.tm_flag = 1;
- }
-#endif
}
}
struct rte_port *port;
port = &ports[slave_pid];
- return port->slave_flag;
+ if ((rte_eth_devices[slave_pid].data->dev_flags &
+ RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
+ return 1;
+ return 0;
}
const uint16_t vlan_tags[] = {
};
static int
-get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
+get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
enum dcb_mode_enable dcb_mode,
enum rte_eth_nb_tcs num_tcs,
uint8_t pfc_en)
{
uint8_t i;
+ int32_t rc;
+ struct rte_eth_rss_conf rss_conf;
/*
* Builds up the correct configuration for dcb+vt based on the vlan tags array
struct rte_eth_dcb_tx_conf *tx_conf =
ð_conf->tx_adv_conf.dcb_tx_conf;
+ rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
+ if (rc != 0)
+ return rc;
+
rx_conf->nb_tcs = num_tcs;
tx_conf->nb_tcs = num_tcs;
rx_conf->dcb_tc[i] = i % num_tcs;
tx_conf->dcb_tc[i] = i % num_tcs;
}
+
eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
- eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
+ eth_conf->rx_adv_conf.rss_conf = rss_conf;
eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
}
port_conf.txmode = rte_port->dev_conf.txmode;
/*set configuration of DCB in vt mode and DCB in non-vt mode*/
- retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
+ retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
if (retval < 0)
return retval;
port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
- /**
- * Write the configuration into the device.
- * Set the numbers of RX & TX queues to 0, so
- * the RX & TX queues will not be setup.
- */
- rte_eth_dev_configure(pid, 0, 0, &port_conf);
-
+ /* re-configure the device . */
+ retval = rte_eth_dev_configure(pid, nb_rxq, nb_rxq, &port_conf);
+ if (retval < 0)
+ return retval;
rte_eth_dev_info_get(pid, &rte_port->dev_info);
/* If dev_info.vmdq_pool_base is greater than 0,
"rte_zmalloc(%d struct rte_port) failed\n",
RTE_MAX_ETHPORTS);
}
+
+ /* Initialize ports NUMA structures */
+ memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
+ memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
}
static void
printf("\nPort statistics ====================================");
for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
nic_stats_display(fwd_ports_ids[i]);
+
+ fflush(stdout);
}
static void
{
int diag;
portid_t port_id;
+ uint16_t count;
int ret;
signal(SIGINT, signal_handler);
rte_panic("Cannot register log type");
rte_log_set_level(testpmd_logtype, RTE_LOG_DEBUG);
+ ret = register_eth_event_callback();
+ if (ret != 0)
+ rte_panic("Cannot register for ethdev events");
+
+#ifdef RTE_LIBRTE_PDUMP
+ /* initialize packet capture framework */
+ rte_pdump_init();
+#endif
+
+ count = 0;
+ RTE_ETH_FOREACH_DEV(port_id) {
+ ports_ids[count] = port_id;
+ count++;
+ }
+ nb_ports = (portid_t) count;
+ if (nb_ports == 0)
+ TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
+
+ /* allocate port structures, and init them */
+ init_port();
+
+ set_def_fwd_config();
+ if (nb_lcores == 0)
+ rte_panic("Empty set of forwarding logical cores - check the "
+ "core mask supplied in the command parameters\n");
+
/* Bitrate/latency stats disabled by default */
#ifdef RTE_LIBRTE_BITRATE
bitrate_enabled = 0;
latencystats_enabled = 0;
#endif
+ /* on FreeBSD, mlockall() is disabled by default */
+#ifdef RTE_EXEC_ENV_FREEBSD
+ do_mlockall = 0;
+#else
+ do_mlockall = 1;
+#endif
+
argc -= diag;
argv += diag;
if (argc > 1)
strerror(errno));
}
-#ifdef RTE_LIBRTE_PDUMP
- /* initialize packet capture framework */
- rte_pdump_init(NULL);
-#endif
-
- nb_ports = (portid_t) rte_eth_dev_count_avail();
- if (nb_ports == 0)
- TESTPMD_LOG(WARNING, "No probed ethernet devices\n");
-
- /* allocate port structures, and init them */
- init_port();
-
- set_def_fwd_config();
- if (nb_lcores == 0)
- rte_panic("Empty set of forwarding logical cores - check the "
- "core mask supplied in the command parameters\n");
-
if (tx_first && interactive)
rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
"interactive mode.\n");
init_config();
if (hot_plug) {
- /* enable hot plug monitoring */
+ ret = rte_dev_hotplug_handle_enable();
+ if (ret) {
+ RTE_LOG(ERR, EAL,
+ "fail to enable hotplug handling.");
+ return -1;
+ }
+
ret = rte_dev_event_monitor_start();
if (ret) {
- rte_errno = EINVAL;
+ RTE_LOG(ERR, EAL,
+ "fail to start device event monitoring.");
return -1;
}
- eth_dev_event_callback_register();
+ ret = rte_dev_event_callback_register(NULL,
+ dev_event_callback, NULL);
+ if (ret) {
+ RTE_LOG(ERR, EAL,
+ "fail to register device event callback\n");
+ return -1;
+ }
}
if (start_port(RTE_PORT_ALL) != 0)