X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_pipeline_common.c;h=ddaa9f3fdb5a6f003b41a6007af91cf7f1118f35;hb=08dfff78f200bb9f077b0006ef05ab59dbc40ac8;hp=63f54daefce37f08a68aafc6d511679d76b1356b;hpb=385cefd91ee207a89fb2005dcf0a6761fe241ecf;p=dpdk.git diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c index 63f54daefc..ddaa9f3fdb 100644 --- a/app/test-eventdev/test_pipeline_common.c +++ b/app/test-eventdev/test_pipeline_common.c @@ -13,12 +13,12 @@ pipeline_test_result(struct evt_test *test, struct evt_options *opt) uint64_t total = 0; struct test_pipeline *t = evt_test_priv(test); - printf("Packet distribution across worker cores :\n"); + evt_info("Packet distribution across worker cores :"); for (i = 0; i < t->nb_workers; i++) total += t->worker[i].processed_pkts; for (i = 0; i < t->nb_workers; i++) - printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:" - CLGRN" %3.2f\n"CLNRM, i, + evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:" + CLGRN" %3.2f"CLNRM, i, t->worker[i].processed_pkts, (((double)t->worker[i].processed_pkts)/total) * 100); @@ -36,18 +36,93 @@ pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) evt_dump_queue_priority(opt); evt_dump_sched_type_list(opt); evt_dump_producer_type(opt); + evt_dump("nb_eth_rx_queues", "%d", opt->eth_queues); + evt_dump("event_vector", "%d", opt->ena_vector); + if (opt->ena_vector) { + evt_dump("vector_size", "%d", opt->vector_size); + evt_dump("vector_tmo_ns", "%" PRIu64 "", opt->vector_tmo_nsec); + } +} + +static inline uint64_t +processed_pkts(struct test_pipeline *t) +{ + uint8_t i; + uint64_t total = 0; + + for (i = 0; i < t->nb_workers; i++) + total += t->worker[i].processed_pkts; + + return total; +} + +int +pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, + int (*worker)(void *)) +{ + int ret, lcore_id; + struct test_pipeline *t = evt_test_priv(test); + + int port_idx = 0; + /* launch workers */ + RTE_LCORE_FOREACH_WORKER(lcore_id) { + if (!(opt->wlcores[lcore_id])) + continue; + + ret = rte_eal_remote_launch(worker, + &t->worker[port_idx], lcore_id); + if (ret) { + evt_err("failed to launch worker %d", lcore_id); + return ret; + } + port_idx++; + } + + uint64_t perf_cycles = rte_get_timer_cycles(); + const uint64_t perf_sample = rte_get_timer_hz(); + + static float total_mpps; + static uint64_t samples; + + uint64_t prev_pkts = 0; + + while (t->done == false) { + const uint64_t new_cycles = rte_get_timer_cycles(); + + if ((new_cycles - perf_cycles) > perf_sample) { + const uint64_t curr_pkts = processed_pkts(t); + + float mpps = (float)(curr_pkts - prev_pkts)/1000000; + + prev_pkts = curr_pkts; + perf_cycles = new_cycles; + total_mpps += mpps; + ++samples; + printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, + mpps, total_mpps/samples); + fflush(stdout); + } + } + printf("\n"); + return 0; } int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) { unsigned int lcores; - /* - * N worker + 1 master - */ + + /* N worker + main */ lcores = 2; - if (!rte_eth_dev_count()) { + if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) { + evt_err("Invalid producer type '%s' valid producer '%s'", + evt_prod_id_to_name(opt->prod_type), + evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR)); + return -1; + } + + if (!rte_eth_dev_count_avail()) { evt_err("test needs minimum 1 ethernet dev"); return -1; } @@ -58,8 +133,8 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) } /* Validate worker lcores */ - if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { - evt_err("worker lcores overlaps with master lcore"); + if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) { + evt_err("worker lcores overlaps with main lcore"); return -1; } if (evt_has_disabled_lcore(opt->wlcores)) { @@ -94,64 +169,118 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i, j; + int ret; uint8_t nb_queues = 1; - uint8_t mt_state = 0; struct test_pipeline *t = evt_test_priv(test); struct rte_eth_rxconf rx_conf; struct rte_eth_conf port_conf = { .rxmode = { - .mq_mode = ETH_MQ_RX_RSS, - .max_rx_pkt_len = ETHER_MAX_LEN, - .offloads = DEV_RX_OFFLOAD_CRC_STRIP, - .ignore_offload_bitfield = 1, + .mq_mode = RTE_ETH_MQ_RX_RSS, }, .rx_adv_conf = { .rss_conf = { .rss_key = NULL, - .rss_hf = ETH_RSS_IP, + .rss_hf = RTE_ETH_RSS_IP, }, }, }; - RTE_SET_USED(opt); - if (!rte_eth_dev_count()) { - evt_err("No ethernet ports found.\n"); + if (!rte_eth_dev_count_avail()) { + evt_err("No ethernet ports found."); return -ENODEV; } - for (i = 0; i < rte_eth_dev_count(); i++) { + if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) { + evt_err("max_pkt_sz can not be less than %d", + RTE_ETHER_MIN_LEN); + return -EINVAL; + } + + port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN - + RTE_ETHER_CRC_LEN; + + t->internal_port = 1; + RTE_ETH_FOREACH_DEV(i) { struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_port_conf = port_conf; + uint32_t caps = 0; + + ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps); + if (ret != 0) { + evt_err("failed to get event tx adapter[%d] caps", i); + return ret; + } + + if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) + t->internal_port = 0; + + ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, i, &caps); + if (ret != 0) { + evt_err("failed to get event tx adapter[%d] caps", i); + return ret; + } + + if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) + local_port_conf.rxmode.offloads |= + RTE_ETH_RX_OFFLOAD_RSS_HASH; + + ret = rte_eth_dev_info_get(i, &dev_info); + if (ret != 0) { + evt_err("Error during getting device (port %u) info: %s\n", + i, strerror(-ret)); + return ret; + } + + /* Enable mbuf fast free if PMD has the capability. */ + if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; - memset(&dev_info, 0, sizeof(struct rte_eth_dev_info)); - rte_eth_dev_info_get(i, &dev_info); - mt_state = !(dev_info.tx_offload_capa & - DEV_TX_OFFLOAD_MT_LOCKFREE); rx_conf = dev_info.default_rxconf; rx_conf.offloads = port_conf.rxmode.offloads; - if (rte_eth_dev_configure(i, nb_queues, nb_queues, - &port_conf) - < 0) { - evt_err("Failed to configure eth port [%d]\n", i); + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + evt_info("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"", + i, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + + if (rte_eth_dev_configure(i, opt->eth_queues, nb_queues, + &local_port_conf) < 0) { + evt_err("Failed to configure eth port [%d]", i); return -EINVAL; } - if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, - rte_socket_id(), &rx_conf, t->pool) < 0) { - evt_err("Failed to setup eth port [%d] rx_queue: %d.\n", + for (j = 0; j < opt->eth_queues; j++) { + if (rte_eth_rx_queue_setup( + i, j, NB_RX_DESC, rte_socket_id(), &rx_conf, + opt->per_port_pool ? t->pool[i] : + t->pool[0]) < 0) { + evt_err("Failed to setup eth port [%d] rx_queue: %d.", i, 0); - return -EINVAL; + return -EINVAL; + } } + if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, rte_socket_id(), NULL) < 0) { - evt_err("Failed to setup eth port [%d] tx_queue: %d.\n", + evt_err("Failed to setup eth port [%d] tx_queue: %d.", i, 0); return -EINVAL; } - t->mt_unsafe |= mt_state; - rte_eth_promiscuous_enable(i); + ret = rte_eth_promiscuous_enable(i); + if (ret != 0) { + evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", + i, rte_strerror(-ret)); + return ret; + } } return 0; @@ -162,7 +291,6 @@ pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, uint8_t *queue_arr, uint8_t nb_queues, const struct rte_event_port_conf p_conf) { - int i; int ret; uint8_t port; struct test_pipeline *t = evt_test_priv(test); @@ -183,23 +311,15 @@ pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, return ret; } - if (queue_arr == NULL) { - if (rte_event_port_link(opt->dev_id, port, NULL, NULL, - 0) != nb_queues) - goto link_fail; - } else { - for (i = 0; i < nb_queues; i++) { - if (rte_event_port_link(opt->dev_id, port, - &queue_arr[i], NULL, 1) != 1) - goto link_fail; - } - } + if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL, + nb_queues) != nb_queues) + goto link_fail; } return 0; link_fail: - evt_err("failed to link all queues to port %d", port); + evt_err("failed to link queues to port %d", port); return -EINVAL; } @@ -209,12 +329,26 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, { int ret = 0; uint16_t prod; + struct rte_mempool *vector_pool = NULL; struct rte_event_eth_rx_adapter_queue_conf queue_conf; memset(&queue_conf, 0, sizeof(struct rte_event_eth_rx_adapter_queue_conf)); queue_conf.ev.sched_type = opt->sched_type_list[0]; - for (prod = 0; prod < rte_eth_dev_count(); prod++) { + if (opt->ena_vector) { + unsigned int nb_elem = (opt->pool_sz / opt->vector_size) << 1; + + nb_elem = nb_elem ? nb_elem : 1; + vector_pool = rte_event_vector_pool_create( + "vector_pool", nb_elem, 0, opt->vector_size, + opt->socket_id); + if (vector_pool == NULL) { + evt_err("failed to create event vector pool"); + return -ENOMEM; + } + } + RTE_ETH_FOREACH_DEV(prod) { + struct rte_event_eth_rx_adapter_vector_limits limits; uint32_t cap; ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, @@ -225,6 +359,54 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, opt->dev_id); return ret; } + + if (opt->ena_vector) { + memset(&limits, 0, sizeof(limits)); + ret = rte_event_eth_rx_adapter_vector_limits_get( + opt->dev_id, prod, &limits); + if (ret) { + evt_err("failed to get vector limits"); + return ret; + } + + if (opt->vector_size < limits.min_sz || + opt->vector_size > limits.max_sz) { + evt_err("Vector size [%d] not within limits max[%d] min[%d]", + opt->vector_size, limits.min_sz, + limits.max_sz); + return -EINVAL; + } + + if (limits.log2_sz && + !rte_is_power_of_2(opt->vector_size)) { + evt_err("Vector size [%d] not power of 2", + opt->vector_size); + return -EINVAL; + } + + if (opt->vector_tmo_nsec > limits.max_timeout_ns || + opt->vector_tmo_nsec < limits.min_timeout_ns) { + evt_err("Vector timeout [%" PRIu64 + "] not within limits max[%" PRIu64 + "] min[%" PRIu64 "]", + opt->vector_tmo_nsec, + limits.max_timeout_ns, + limits.min_timeout_ns); + return -EINVAL; + } + + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) { + queue_conf.vector_sz = opt->vector_size; + queue_conf.vector_timeout_ns = + opt->vector_tmo_nsec; + queue_conf.rx_queue_flags |= + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR; + queue_conf.vector_mp = vector_pool; + } else { + evt_err("Rx adapter doesn't support event vector"); + return -EINVAL; + } + } queue_conf.ev.queue_id = prod * stride; ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, &prod_conf); @@ -240,32 +422,84 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, } if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { - uint32_t service_id; + uint32_t service_id = -1U; rte_event_eth_rx_adapter_service_id_get(prod, &service_id); ret = evt_service_setup(service_id); if (ret) { evt_err("Failed to setup service core" - " for Rx adapter\n"); + " for Rx adapter"); return ret; } } - ret = rte_eth_dev_start(prod); + evt_info("Port[%d] using Rx adapter[%d] configured", prod, + prod); + } + + return ret; +} + +int +pipeline_event_tx_adapter_setup(struct evt_options *opt, + struct rte_event_port_conf port_conf) +{ + int ret = 0; + uint16_t consm; + + RTE_ETH_FOREACH_DEV(consm) { + uint32_t cap; + + ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, + consm, &cap); + if (ret) { + evt_err("failed to get event tx adapter[%d] caps", + consm); + return ret; + } + + if (opt->ena_vector) { + if (!(cap & + RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR)) { + evt_err("Tx adapter doesn't support event vector"); + return -EINVAL; + } + } + + ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id, + &port_conf); if (ret) { - evt_err("Ethernet dev [%d] failed to start." - " Using synthetic producer", prod); + evt_err("failed to create tx adapter[%d]", consm); return ret; } - ret = rte_event_eth_rx_adapter_start(prod); + ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1); if (ret) { - evt_err("Rx adapter[%d] start failed", prod); + evt_err("failed to add tx queues to adapter[%d]", + consm); return ret; } - printf("%s: Port[%d] using Rx adapter[%d] started\n", __func__, - prod, prod); + + if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) { + uint32_t service_id = -1U; + + ret = rte_event_eth_tx_adapter_service_id_get(consm, + &service_id); + if (ret != -ESRCH && ret != 0) { + evt_err("Failed to get Tx adptr service ID"); + return ret; + } + ret = evt_service_setup(service_id); + if (ret) { + evt_err("Failed to setup service core" + " for Tx adapter"); + return ret; + } + } + + evt_info("Port[%d] using Tx adapter[%d] Configured", consm, + consm); } return ret; @@ -274,14 +508,14 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i; RTE_SET_USED(test); RTE_SET_USED(opt); - for (i = 0; i < rte_eth_dev_count(); i++) { + RTE_ETH_FOREACH_DEV(i) { rte_event_eth_rx_adapter_stop(i); + rte_event_eth_tx_adapter_stop(i); rte_eth_dev_stop(i); - rte_eth_dev_close(i); } } @@ -298,17 +532,64 @@ int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) { struct test_pipeline *t = evt_test_priv(test); + int i, ret; + + if (!opt->mbuf_sz) + opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE; + + if (!opt->max_pkt_sz) + opt->max_pkt_sz = RTE_ETHER_MAX_LEN; + + RTE_ETH_FOREACH_DEV(i) { + struct rte_eth_dev_info dev_info; + uint16_t data_size = 0; + + memset(&dev_info, 0, sizeof(dev_info)); + ret = rte_eth_dev_info_get(i, &dev_info); + if (ret != 0) { + evt_err("Error during getting device (port %u) info: %s\n", + i, strerror(-ret)); + return ret; + } + + if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && + dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { + data_size = opt->max_pkt_sz / + dev_info.rx_desc_lim.nb_mtu_seg_max; + data_size += RTE_PKTMBUF_HEADROOM; + + if (data_size > opt->mbuf_sz) + opt->mbuf_sz = data_size; + } + if (opt->per_port_pool) { + char name[RTE_MEMPOOL_NAMESIZE]; + + snprintf(name, RTE_MEMPOOL_NAMESIZE, "%s-%d", + test->name, i); + t->pool[i] = rte_pktmbuf_pool_create( + name, /* mempool name */ + opt->pool_sz, /* number of elements*/ + 0, /* cache size*/ + 0, opt->mbuf_sz, opt->socket_id); /* flags */ + + if (t->pool[i] == NULL) { + evt_err("failed to create mempool %s", name); + return -ENOMEM; + } + } + } - t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ + if (!opt->per_port_pool) { + t->pool[0] = rte_pktmbuf_pool_create( + test->name, /* mempool name */ opt->pool_sz, /* number of elements*/ - 512, /* cache size*/ - 0, - RTE_MBUF_DEFAULT_BUF_SIZE, - opt->socket_id); /* flags */ - - if (t->pool == NULL) { - evt_err("failed to create mempool"); - return -ENOMEM; + 0, /* cache size*/ + 0, opt->mbuf_sz, opt->socket_id); /* flags */ + + if (t->pool[0] == NULL) { + evt_err("failed to create mempool"); + return -ENOMEM; + } } return 0; @@ -317,10 +598,16 @@ pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt) { - RTE_SET_USED(opt); struct test_pipeline *t = evt_test_priv(test); + int i; - rte_mempool_free(t->pool); + RTE_SET_USED(opt); + if (opt->per_port_pool) { + RTE_ETH_FOREACH_DEV(i) + rte_mempool_free(t->pool[i]); + } else { + rte_mempool_free(t->pool[0]); + } } int