X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_pipeline_common.c;h=c66656cd3997a1ff22fb05dd42f2fda021fbc8b4;hb=3dea1b2693ab8b46f5add0642b08714884634bae;hp=c20815c2531967c627b7be27248cd0776bf1fe01;hpb=76f98e02ac12e4e70ca5719975ea799461ab3934;p=dpdk.git diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c index c20815c253..c66656cd39 100644 --- a/app/test-eventdev/test_pipeline_common.c +++ b/app/test-eventdev/test_pipeline_common.c @@ -13,12 +13,12 @@ pipeline_test_result(struct evt_test *test, struct evt_options *opt) uint64_t total = 0; struct test_pipeline *t = evt_test_priv(test); - printf("Packet distribution across worker cores :\n"); + evt_info("Packet distribution across worker cores :"); for (i = 0; i < t->nb_workers; i++) total += t->worker[i].processed_pkts; for (i = 0; i < t->nb_workers; i++) - printf("Worker %d packets: "CLGRN"%"PRIx64" "CLNRM"percentage:" - CLGRN" %3.2f\n"CLNRM, i, + evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:" + CLGRN" %3.2f"CLNRM, i, t->worker[i].processed_pkts, (((double)t->worker[i].processed_pkts)/total) * 100); @@ -36,18 +36,93 @@ pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) evt_dump_queue_priority(opt); evt_dump_sched_type_list(opt); evt_dump_producer_type(opt); + evt_dump("nb_eth_rx_queues", "%d", opt->eth_queues); + evt_dump("event_vector", "%d", opt->ena_vector); + if (opt->ena_vector) { + evt_dump("vector_size", "%d", opt->vector_size); + evt_dump("vector_tmo_ns", "%" PRIu64 "", opt->vector_tmo_nsec); + } +} + +static inline uint64_t +processed_pkts(struct test_pipeline *t) +{ + uint8_t i; + uint64_t total = 0; + + for (i = 0; i < t->nb_workers; i++) + total += t->worker[i].processed_pkts; + + return total; +} + +int +pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, + int (*worker)(void *)) +{ + int ret, lcore_id; + struct test_pipeline *t = evt_test_priv(test); + + int port_idx = 0; + /* launch workers */ + RTE_LCORE_FOREACH_WORKER(lcore_id) { + if (!(opt->wlcores[lcore_id])) + continue; + + ret = rte_eal_remote_launch(worker, + &t->worker[port_idx], lcore_id); + if (ret) { + evt_err("failed to launch worker %d", lcore_id); + return ret; + } + port_idx++; + } + + uint64_t perf_cycles = rte_get_timer_cycles(); + const uint64_t perf_sample = rte_get_timer_hz(); + + static float total_mpps; + static uint64_t samples; + + uint64_t prev_pkts = 0; + + while (t->done == false) { + const uint64_t new_cycles = rte_get_timer_cycles(); + + if ((new_cycles - perf_cycles) > perf_sample) { + const uint64_t curr_pkts = processed_pkts(t); + + float mpps = (float)(curr_pkts - prev_pkts)/1000000; + + prev_pkts = curr_pkts; + perf_cycles = new_cycles; + total_mpps += mpps; + ++samples; + printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, + mpps, total_mpps/samples); + fflush(stdout); + } + } + printf("\n"); + return 0; } int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) { unsigned int lcores; - /* - * N worker + 1 master - */ + + /* N worker + main */ lcores = 2; - if (!rte_eth_dev_count()) { + if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) { + evt_err("Invalid producer type '%s' valid producer '%s'", + evt_prod_id_to_name(opt->prod_type), + evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR)); + return -1; + } + + if (!rte_eth_dev_count_avail()) { evt_err("test needs minimum 1 ethernet dev"); return -1; } @@ -58,8 +133,8 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) } /* Validate worker lcores */ - if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { - evt_err("worker lcores overlaps with master lcore"); + if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) { + evt_err("worker lcores overlaps with main lcore"); return -1; } if (evt_has_disabled_lcore(opt->wlcores)) { @@ -94,80 +169,421 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) { - int i; + uint16_t i, j; + int ret; uint8_t nb_queues = 1; - uint8_t mt_state = 0; struct test_pipeline *t = evt_test_priv(test); struct rte_eth_rxconf rx_conf; struct rte_eth_conf port_conf = { .rxmode = { - .mq_mode = ETH_MQ_RX_RSS, - .max_rx_pkt_len = ETHER_MAX_LEN, - .offloads = DEV_RX_OFFLOAD_CRC_STRIP, - .ignore_offload_bitfield = 1, + .mq_mode = RTE_ETH_MQ_RX_RSS, }, .rx_adv_conf = { .rss_conf = { .rss_key = NULL, - .rss_hf = ETH_RSS_IP, + .rss_hf = RTE_ETH_RSS_IP, }, }, }; - RTE_SET_USED(opt); - if (!rte_eth_dev_count()) { - evt_err("No ethernet ports found.\n"); + if (!rte_eth_dev_count_avail()) { + evt_err("No ethernet ports found."); return -ENODEV; } - for (i = 0; i < rte_eth_dev_count(); i++) { + if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) { + evt_err("max_pkt_sz can not be less than %d", + RTE_ETHER_MIN_LEN); + return -EINVAL; + } + + port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN - + RTE_ETHER_CRC_LEN; + + t->internal_port = 1; + RTE_ETH_FOREACH_DEV(i) { struct rte_eth_dev_info dev_info; + struct rte_eth_conf local_port_conf = port_conf; + uint32_t caps = 0; + + ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps); + if (ret != 0) { + evt_err("failed to get event tx adapter[%d] caps", i); + return ret; + } + + if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) + t->internal_port = 0; + + ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, i, &caps); + if (ret != 0) { + evt_err("failed to get event tx adapter[%d] caps", i); + return ret; + } + + if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) + local_port_conf.rxmode.offloads |= + RTE_ETH_RX_OFFLOAD_RSS_HASH; + + ret = rte_eth_dev_info_get(i, &dev_info); + if (ret != 0) { + evt_err("Error during getting device (port %u) info: %s\n", + i, strerror(-ret)); + return ret; + } + + /* Enable mbuf fast free if PMD has the capability. */ + if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; - memset(&dev_info, 0, sizeof(struct rte_eth_dev_info)); - rte_eth_dev_info_get(i, &dev_info); - mt_state = !(dev_info.tx_offload_capa & - DEV_TX_OFFLOAD_MT_LOCKFREE); rx_conf = dev_info.default_rxconf; rx_conf.offloads = port_conf.rxmode.offloads; - if (rte_eth_dev_configure(i, nb_queues, nb_queues, - &port_conf) - < 0) { - evt_err("Failed to configure eth port [%d]\n", i); + local_port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf.rx_adv_conf.rss_conf.rss_hf) { + evt_info("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"", + i, + port_conf.rx_adv_conf.rss_conf.rss_hf, + local_port_conf.rx_adv_conf.rss_conf.rss_hf); + } + + if (rte_eth_dev_configure(i, opt->eth_queues, nb_queues, + &local_port_conf) < 0) { + evt_err("Failed to configure eth port [%d]", i); return -EINVAL; } - if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, - rte_socket_id(), &rx_conf, t->pool) < 0) { - evt_err("Failed to setup eth port [%d] rx_queue: %d.\n", + for (j = 0; j < opt->eth_queues; j++) { + if (rte_eth_rx_queue_setup( + i, j, NB_RX_DESC, rte_socket_id(), &rx_conf, + opt->per_port_pool ? t->pool[i] : + t->pool[0]) < 0) { + evt_err("Failed to setup eth port [%d] rx_queue: %d.", i, 0); - return -EINVAL; + return -EINVAL; + } } + if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, rte_socket_id(), NULL) < 0) { - evt_err("Failed to setup eth port [%d] tx_queue: %d.\n", + evt_err("Failed to setup eth port [%d] tx_queue: %d.", i, 0); return -EINVAL; } - t->mt_unsafe |= mt_state; - rte_eth_promiscuous_enable(i); + ret = rte_eth_promiscuous_enable(i); + if (ret != 0) { + evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", + i, rte_strerror(-ret)); + return ret; + } } return 0; } +int +pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, + uint8_t *queue_arr, uint8_t nb_queues, + const struct rte_event_port_conf p_conf) +{ + int ret; + uint8_t port; + struct test_pipeline *t = evt_test_priv(test); + + + /* setup one port per worker, linking to all queues */ + for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) { + struct worker_data *w = &t->worker[port]; + + w->dev_id = opt->dev_id; + w->port_id = port; + w->t = t; + w->processed_pkts = 0; + + ret = rte_event_port_setup(opt->dev_id, port, &p_conf); + if (ret) { + evt_err("failed to setup port %d", port); + return ret; + } + + if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL, + nb_queues) != nb_queues) + goto link_fail; + } + + return 0; + +link_fail: + evt_err("failed to link queues to port %d", port); + return -EINVAL; +} + +int +pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, + struct rte_event_port_conf prod_conf) +{ + int ret = 0; + uint16_t prod; + struct rte_mempool *vector_pool = NULL; + struct rte_event_eth_rx_adapter_queue_conf queue_conf; + + memset(&queue_conf, 0, + sizeof(struct rte_event_eth_rx_adapter_queue_conf)); + queue_conf.ev.sched_type = opt->sched_type_list[0]; + if (opt->ena_vector) { + unsigned int nb_elem = (opt->pool_sz / opt->vector_size) << 1; + + nb_elem = nb_elem ? nb_elem : 1; + vector_pool = rte_event_vector_pool_create( + "vector_pool", nb_elem, 0, opt->vector_size, + opt->socket_id); + if (vector_pool == NULL) { + evt_err("failed to create event vector pool"); + return -ENOMEM; + } + } + RTE_ETH_FOREACH_DEV(prod) { + struct rte_event_eth_rx_adapter_vector_limits limits; + uint32_t cap; + + ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, + prod, &cap); + if (ret) { + evt_err("failed to get event rx adapter[%d]" + " capabilities", + opt->dev_id); + return ret; + } + + if (opt->ena_vector) { + memset(&limits, 0, sizeof(limits)); + ret = rte_event_eth_rx_adapter_vector_limits_get( + opt->dev_id, prod, &limits); + if (ret) { + evt_err("failed to get vector limits"); + return ret; + } + + if (opt->vector_size < limits.min_sz || + opt->vector_size > limits.max_sz) { + evt_err("Vector size [%d] not within limits max[%d] min[%d]", + opt->vector_size, limits.min_sz, + limits.max_sz); + return -EINVAL; + } + + if (limits.log2_sz && + !rte_is_power_of_2(opt->vector_size)) { + evt_err("Vector size [%d] not power of 2", + opt->vector_size); + return -EINVAL; + } + + if (opt->vector_tmo_nsec > limits.max_timeout_ns || + opt->vector_tmo_nsec < limits.min_timeout_ns) { + evt_err("Vector timeout [%" PRIu64 + "] not within limits max[%" PRIu64 + "] min[%" PRIu64 "]", + opt->vector_tmo_nsec, + limits.max_timeout_ns, + limits.min_timeout_ns); + return -EINVAL; + } + + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) { + queue_conf.vector_sz = opt->vector_size; + queue_conf.vector_timeout_ns = + opt->vector_tmo_nsec; + queue_conf.rx_queue_flags |= + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR; + queue_conf.vector_mp = vector_pool; + } else { + evt_err("Rx adapter doesn't support event vector"); + return -EINVAL; + } + } + queue_conf.ev.queue_id = prod * stride; + ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, + &prod_conf); + if (ret) { + evt_err("failed to create rx adapter[%d]", prod); + return ret; + } + ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, + &queue_conf); + if (ret) { + evt_err("failed to add rx queues to adapter[%d]", prod); + return ret; + } + + if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { + uint32_t service_id = -1U; + + rte_event_eth_rx_adapter_service_id_get(prod, + &service_id); + ret = evt_service_setup(service_id); + if (ret) { + evt_err("Failed to setup service core" + " for Rx adapter"); + return ret; + } + } + + evt_info("Port[%d] using Rx adapter[%d] configured", prod, + prod); + } + + return ret; +} + +int +pipeline_event_tx_adapter_setup(struct evt_options *opt, + struct rte_event_port_conf port_conf) +{ + int ret = 0; + uint16_t consm; + + RTE_ETH_FOREACH_DEV(consm) { + uint32_t cap; + + ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, + consm, &cap); + if (ret) { + evt_err("failed to get event tx adapter[%d] caps", + consm); + return ret; + } + + if (opt->ena_vector) { + if (!(cap & + RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR)) { + evt_err("Tx adapter doesn't support event vector"); + return -EINVAL; + } + } + + ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id, + &port_conf); + if (ret) { + evt_err("failed to create tx adapter[%d]", consm); + return ret; + } + + ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1); + if (ret) { + evt_err("failed to add tx queues to adapter[%d]", + consm); + return ret; + } + + if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) { + uint32_t service_id = -1U; + + ret = rte_event_eth_tx_adapter_service_id_get(consm, + &service_id); + if (ret != -ESRCH && ret != 0) { + evt_err("Failed to get Tx adptr service ID"); + return ret; + } + ret = evt_service_setup(service_id); + if (ret) { + evt_err("Failed to setup service core" + " for Tx adapter"); + return ret; + } + } + + evt_info("Port[%d] using Tx adapter[%d] Configured", consm, + consm); + } + + return ret; +} + +static void +pipeline_vector_array_free(struct rte_event events[], uint16_t num) +{ + uint16_t i; + + for (i = 0; i < num; i++) { + rte_pktmbuf_free_bulk(events[i].vec->mbufs, + events[i].vec->nb_elem); + rte_mempool_put(rte_mempool_from_obj(events[i].vec), + events[i].vec); + } +} + +static void +pipeline_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev, + void *args __rte_unused) +{ + if (ev.event_type & RTE_EVENT_TYPE_VECTOR) + pipeline_vector_array_free(&ev, 1); + else + rte_pktmbuf_free(ev.mbuf); +} + void -pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) +pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[], + uint16_t enq, uint16_t deq) { int i; + + if (!(deq - enq)) + return; + + if (deq) { + for (i = enq; i < deq; i++) { + if (ev[i].op == RTE_EVENT_OP_RELEASE) + continue; + if (ev[i].event_type & RTE_EVENT_TYPE_VECTOR) + pipeline_vector_array_free(&ev[i], 1); + else + rte_pktmbuf_free(ev[i].mbuf); + } + + for (i = 0; i < deq; i++) + ev[i].op = RTE_EVENT_OP_RELEASE; + + rte_event_enqueue_burst(dev, port, ev, deq); + } + + rte_event_port_quiesce(dev, port, pipeline_event_port_flush, NULL); +} + +void +pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt) +{ + uint16_t i, j; + RTE_SET_USED(test); + + if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { + RTE_ETH_FOREACH_DEV(i) { + rte_event_eth_rx_adapter_stop(i); + rte_event_eth_rx_adapter_queue_del(i, i, -1); + for (j = 0; j < opt->eth_queues; j++) + rte_eth_dev_rx_queue_stop(i, j); + } + } +} + +void +pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) +{ + uint16_t i; RTE_SET_USED(test); RTE_SET_USED(opt); - for (i = 0; i < rte_eth_dev_count(); i++) { - rte_event_eth_rx_adapter_stop(i); + RTE_ETH_FOREACH_DEV(i) { + rte_event_eth_tx_adapter_stop(i); + rte_event_eth_tx_adapter_queue_del(i, i, -1); + rte_eth_dev_tx_queue_stop(i, 0); rte_eth_dev_stop(i); - rte_eth_dev_close(i); } } @@ -184,17 +600,64 @@ int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) { struct test_pipeline *t = evt_test_priv(test); + int i, ret; + + if (!opt->mbuf_sz) + opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE; - t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ + if (!opt->max_pkt_sz) + opt->max_pkt_sz = RTE_ETHER_MAX_LEN; + + RTE_ETH_FOREACH_DEV(i) { + struct rte_eth_dev_info dev_info; + uint16_t data_size = 0; + + memset(&dev_info, 0, sizeof(dev_info)); + ret = rte_eth_dev_info_get(i, &dev_info); + if (ret != 0) { + evt_err("Error during getting device (port %u) info: %s\n", + i, strerror(-ret)); + return ret; + } + + if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && + dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { + data_size = opt->max_pkt_sz / + dev_info.rx_desc_lim.nb_mtu_seg_max; + data_size += RTE_PKTMBUF_HEADROOM; + + if (data_size > opt->mbuf_sz) + opt->mbuf_sz = data_size; + } + if (opt->per_port_pool) { + char name[RTE_MEMPOOL_NAMESIZE]; + + snprintf(name, RTE_MEMPOOL_NAMESIZE, "%s-%d", + test->name, i); + t->pool[i] = rte_pktmbuf_pool_create( + name, /* mempool name */ + opt->pool_sz, /* number of elements*/ + 0, /* cache size*/ + 0, opt->mbuf_sz, opt->socket_id); /* flags */ + + if (t->pool[i] == NULL) { + evt_err("failed to create mempool %s", name); + return -ENOMEM; + } + } + } + + if (!opt->per_port_pool) { + t->pool[0] = rte_pktmbuf_pool_create( + test->name, /* mempool name */ opt->pool_sz, /* number of elements*/ - 512, /* cache size*/ - 0, - RTE_MBUF_DEFAULT_BUF_SIZE, - opt->socket_id); /* flags */ + 0, /* cache size*/ + 0, opt->mbuf_sz, opt->socket_id); /* flags */ - if (t->pool == NULL) { - evt_err("failed to create mempool"); - return -ENOMEM; + if (t->pool[0] == NULL) { + evt_err("failed to create mempool"); + return -ENOMEM; + } } return 0; @@ -203,10 +666,16 @@ pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt) { - RTE_SET_USED(opt); struct test_pipeline *t = evt_test_priv(test); + int i; - rte_mempool_free(t->pool); + RTE_SET_USED(opt); + if (opt->per_port_pool) { + RTE_ETH_FOREACH_DEV(i) + rte_mempool_free(t->pool[i]); + } else { + rte_mempool_free(t->pool[0]); + } } int