X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_pipeline_common.c;h=fa91bf22905289d742f3b9831b6e957f3180d09e;hb=fc6eb30769547d3840996bac8b9d7f30d4e05511;hp=63f54daefce37f08a68aafc6d511679d76b1356b;hpb=385cefd91ee207a89fb2005dcf0a6761fe241ecf;p=dpdk.git diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c index 63f54daefc..fa91bf2290 100644 --- a/app/test-eventdev/test_pipeline_common.c +++ b/app/test-eventdev/test_pipeline_common.c @@ -13,12 +13,12 @@ pipeline_test_result(struct evt_test *test, struct evt_options *opt) uint64_t total = 0; struct test_pipeline *t = evt_test_priv(test); - printf("Packet distribution across worker cores :\n"); + evt_info("Packet distribution across worker cores :"); for (i = 0; i < t->nb_workers; i++) total += t->worker[i].processed_pkts; for (i = 0; i < t->nb_workers; i++) - printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:" - CLGRN" %3.2f\n"CLNRM, i, + evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:" + CLGRN" %3.2f"CLNRM, i, t->worker[i].processed_pkts, (((double)t->worker[i].processed_pkts)/total) * 100); @@ -38,6 +38,70 @@ pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) evt_dump_producer_type(opt); } +static inline uint64_t +processed_pkts(struct test_pipeline *t) +{ + uint8_t i; + uint64_t total = 0; + + rte_smp_rmb(); + for (i = 0; i < t->nb_workers; i++) + total += t->worker[i].processed_pkts; + + return total; +} + +int +pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, + int (*worker)(void *)) +{ + int ret, lcore_id; + struct test_pipeline *t = evt_test_priv(test); + + int port_idx = 0; + /* launch workers */ + RTE_LCORE_FOREACH_SLAVE(lcore_id) { + if (!(opt->wlcores[lcore_id])) + continue; + + ret = rte_eal_remote_launch(worker, + &t->worker[port_idx], lcore_id); + if (ret) { + evt_err("failed to launch worker %d", lcore_id); + return ret; + } + port_idx++; + } + + uint64_t perf_cycles = rte_get_timer_cycles(); + const uint64_t perf_sample = rte_get_timer_hz(); + + static float total_mpps; + static uint64_t samples; + + uint64_t prev_pkts = 0; + + while (t->done == false) { + const uint64_t new_cycles = rte_get_timer_cycles(); + + if ((new_cycles - perf_cycles) > perf_sample) { + const uint64_t curr_pkts = processed_pkts(t); + + float mpps = (float)(curr_pkts - prev_pkts)/1000000; + + prev_pkts = curr_pkts; + perf_cycles = new_cycles; + total_mpps += mpps; + ++samples; + printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, + mpps, total_mpps/samples); + fflush(stdout); + } + } + printf("\n"); + return 0; +} + int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) { @@ -47,7 +111,7 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) */ lcores = 2; - if (!rte_eth_dev_count()) { + if (!rte_eth_dev_count_avail()) { evt_err("test needs minimum 1 ethernet dev"); return -1; } @@ -94,17 +158,14 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; + int ret; uint8_t nb_queues = 1; - uint8_t mt_state = 0; struct test_pipeline *t = evt_test_priv(test); struct rte_eth_rxconf rx_conf; struct rte_eth_conf port_conf = { .rxmode = { .mq_mode = ETH_MQ_RX_RSS, - .max_rx_pkt_len = ETHER_MAX_LEN, - .offloads = DEV_RX_OFFLOAD_CRC_STRIP, - .ignore_offload_bitfield = 1, }, .rx_adv_conf = { .rss_conf = { @@ -114,44 +175,83 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) }, }; - RTE_SET_USED(opt); - if (!rte_eth_dev_count()) { - evt_err("No ethernet ports found.\n"); + if (!rte_eth_dev_count_avail()) { + evt_err("No ethernet ports found."); return -ENODEV; } - for (i = 0; i < rte_eth_dev_count(); i++) { + if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) { + evt_err("max_pkt_sz can not be less than %d", + RTE_ETHER_MIN_LEN); + return -EINVAL; + } + + port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz; + if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN) + port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + + t->internal_port = 1; + RTE_ETH_FOREACH_DEV(i) { struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_port_conf = port_conf; + uint32_t caps = 0; + + ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps); + if (ret != 0) { + evt_err("failed to get event tx adapter[%d] caps", i); + return ret; + } + + if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) + t->internal_port = 0; + + ret = rte_eth_dev_info_get(i, &dev_info); + if (ret != 0) { + evt_err("Error during getting device (port %u) info: %s\n", + i, strerror(-ret)); + return ret; + } - memset(&dev_info, 0, sizeof(struct rte_eth_dev_info)); - rte_eth_dev_info_get(i, &dev_info); - mt_state = !(dev_info.tx_offload_capa & - DEV_TX_OFFLOAD_MT_LOCKFREE); rx_conf = dev_info.default_rxconf; rx_conf.offloads = port_conf.rxmode.offloads; + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + evt_info("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"", + i, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + if (rte_eth_dev_configure(i, nb_queues, nb_queues, - &port_conf) + &local_port_conf) < 0) { - evt_err("Failed to configure eth port [%d]\n", i); + evt_err("Failed to configure eth port [%d]", i); return -EINVAL; } if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, rte_socket_id(), &rx_conf, t->pool) < 0) { - evt_err("Failed to setup eth port [%d] rx_queue: %d.\n", + evt_err("Failed to setup eth port [%d] rx_queue: %d.", i, 0); return -EINVAL; } if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, rte_socket_id(), NULL) < 0) { - evt_err("Failed to setup eth port [%d] tx_queue: %d.\n", + evt_err("Failed to setup eth port [%d] tx_queue: %d.", i, 0); return -EINVAL; } - t->mt_unsafe |= mt_state; - rte_eth_promiscuous_enable(i); + ret = rte_eth_promiscuous_enable(i); + if (ret != 0) { + evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", + i, rte_strerror(-ret)); + return ret; + } } return 0; @@ -162,7 +262,6 @@ pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, uint8_t *queue_arr, uint8_t nb_queues, const struct rte_event_port_conf p_conf) { - int i; int ret; uint8_t port; struct test_pipeline *t = evt_test_priv(test); @@ -183,23 +282,15 @@ pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, return ret; } - if (queue_arr == NULL) { - if (rte_event_port_link(opt->dev_id, port, NULL, NULL, - 0) != nb_queues) - goto link_fail; - } else { - for (i = 0; i < nb_queues; i++) { - if (rte_event_port_link(opt->dev_id, port, - &queue_arr[i], NULL, 1) != 1) - goto link_fail; - } - } + if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL, + nb_queues) != nb_queues) + goto link_fail; } return 0; link_fail: - evt_err("failed to link all queues to port %d", port); + evt_err("failed to link queues to port %d", port); return -EINVAL; } @@ -214,7 +305,7 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, memset(&queue_conf, 0, sizeof(struct rte_event_eth_rx_adapter_queue_conf)); queue_conf.ev.sched_type = opt->sched_type_list[0]; - for (prod = 0; prod < rte_eth_dev_count(); prod++) { + RTE_ETH_FOREACH_DEV(prod) { uint32_t cap; ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, @@ -240,32 +331,72 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, } if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { - uint32_t service_id; + uint32_t service_id = -1U; rte_event_eth_rx_adapter_service_id_get(prod, &service_id); ret = evt_service_setup(service_id); if (ret) { evt_err("Failed to setup service core" - " for Rx adapter\n"); + " for Rx adapter"); return ret; } } - ret = rte_eth_dev_start(prod); + evt_info("Port[%d] using Rx adapter[%d] configured", prod, + prod); + } + + return ret; +} + +int +pipeline_event_tx_adapter_setup(struct evt_options *opt, + struct rte_event_port_conf port_conf) +{ + int ret = 0; + uint16_t consm; + + RTE_ETH_FOREACH_DEV(consm) { + uint32_t cap; + + ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, + consm, &cap); + if (ret) { + evt_err("failed to get event tx adapter[%d] caps", + consm); + return ret; + } + + ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id, + &port_conf); if (ret) { - evt_err("Ethernet dev [%d] failed to start." - " Using synthetic producer", prod); + evt_err("failed to create tx adapter[%d]", consm); return ret; } - ret = rte_event_eth_rx_adapter_start(prod); + ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1); if (ret) { - evt_err("Rx adapter[%d] start failed", prod); + evt_err("failed to add tx queues to adapter[%d]", + consm); return ret; } - printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__, - prod, prod); + + if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) { + uint32_t service_id = -1U; + + rte_event_eth_tx_adapter_service_id_get(consm, + &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("Failed to setup service core" + " for Tx adapter\n"); + return ret; + } + } + + evt_info("Port[%d] using Tx adapter[%d] Configured", consm, + consm); } return ret; @@ -274,14 +405,14 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; RTE_SET_USED(test); RTE_SET_USED(opt); - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { rte_event_eth_rx_adapter_stop(i); + rte_event_eth_tx_adapter_stop(i); rte_eth_dev_stop(i); - rte_eth_dev_close(i); } } @@ -298,12 +429,42 @@ int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) { struct test_pipeline *t = evt_test_priv(test); + int i, ret; + + if (!opt->mbuf_sz) + opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE; + + if (!opt->max_pkt_sz) + opt->max_pkt_sz = RTE_ETHER_MAX_LEN; + + RTE_ETH_FOREACH_DEV(i) { + struct rte_eth_dev_info dev_info; + uint16_t data_size = 0; + + memset(&dev_info, 0, sizeof(dev_info)); + ret = rte_eth_dev_info_get(i, &dev_info); + if (ret != 0) { + evt_err("Error during getting device (port %u) info: %s\n", + i, strerror(-ret)); + return ret; + } + + if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && + dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { + data_size = opt->max_pkt_sz / + dev_info.rx_desc_lim.nb_mtu_seg_max; + data_size += RTE_PKTMBUF_HEADROOM; + + if (data_size > opt->mbuf_sz) + opt->mbuf_sz = data_size; + } + } t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ opt->pool_sz, /* number of elements*/ 512, /* cache size*/ 0, - RTE_MBUF_DEFAULT_BUF_SIZE, + opt->mbuf_sz, opt->socket_id); /* flags */ if (t->pool == NULL) {