X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=app%2Ftest-eventdev%2Ftest_pipeline_common.c;h=ddaa9f3fdb5a6f003b41a6007af91cf7f1118f35;hb=08dfff78f200bb9f077b0006ef05ab59dbc40ac8;hp=1e525643d33011566494c011da78d1c3f9da7dc1;hpb=8f7bc65cefb2fa8e79cf32bcc15945de801f9b48;p=dpdk.git diff --git a/app/test-eventdev/test_pipeline_common.c b/app/test-eventdev/test_pipeline_common.c index 1e525643d3..ddaa9f3fdb 100644 --- a/app/test-eventdev/test_pipeline_common.c +++ b/app/test-eventdev/test_pipeline_common.c @@ -36,6 +36,12 @@ pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) evt_dump_queue_priority(opt); evt_dump_sched_type_list(opt); evt_dump_producer_type(opt); + evt_dump("nb_eth_rx_queues", "%d", opt->eth_queues); + evt_dump("event_vector", "%d", opt->ena_vector); + if (opt->ena_vector) { + evt_dump("vector_size", "%d", opt->vector_size); + evt_dump("vector_tmo_ns", "%" PRIu64 "", opt->vector_tmo_nsec); + } } static inline uint64_t @@ -44,7 +50,6 @@ processed_pkts(struct test_pipeline *t) uint8_t i; uint64_t total = 0; - rte_smp_rmb(); for (i = 0; i < t->nb_workers; i++) total += t->worker[i].processed_pkts; @@ -60,7 +65,7 @@ pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, int port_idx = 0; /* launch workers */ - RTE_LCORE_FOREACH_SLAVE(lcore_id) { + RTE_LCORE_FOREACH_WORKER(lcore_id) { if (!(opt->wlcores[lcore_id])) continue; @@ -106,11 +111,17 @@ int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) { unsigned int lcores; - /* - * N worker + 1 master - */ + + /* N worker + main */ lcores = 2; + if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) { + evt_err("Invalid producer type '%s' valid producer '%s'", + evt_prod_id_to_name(opt->prod_type), + evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR)); + return -1; + } + if (!rte_eth_dev_count_avail()) { evt_err("test needs minimum 1 ethernet dev"); return -1; @@ -122,8 +133,8 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) } /* Validate worker lcores */ - if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { - evt_err("worker lcores overlaps with master lcore"); + if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) { + evt_err("worker lcores overlaps with main lcore"); return -1; } if (evt_has_disabled_lcore(opt->wlcores)) { @@ -158,40 +169,74 @@ pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) { - uint16_t i; + uint16_t i, j; + int ret; uint8_t nb_queues = 1; struct test_pipeline *t = evt_test_priv(test); struct rte_eth_rxconf rx_conf; struct rte_eth_conf port_conf = { .rxmode = { - .mq_mode = ETH_MQ_RX_RSS, - .max_rx_pkt_len = ETHER_MAX_LEN, + .mq_mode = RTE_ETH_MQ_RX_RSS, }, .rx_adv_conf = { .rss_conf = { .rss_key = NULL, - .rss_hf = ETH_RSS_IP, + .rss_hf = RTE_ETH_RSS_IP, }, }, }; - RTE_SET_USED(opt); if (!rte_eth_dev_count_avail()) { evt_err("No ethernet ports found."); return -ENODEV; } + if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) { + evt_err("max_pkt_sz can not be less than %d", + RTE_ETHER_MIN_LEN); + return -EINVAL; + } + + port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN - + RTE_ETHER_CRC_LEN; + t->internal_port = 1; RTE_ETH_FOREACH_DEV(i) { struct rte_eth_dev_info dev_info; struct rte_eth_conf local_port_conf = port_conf; uint32_t caps = 0; - rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps); + ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps); + if (ret != 0) { + evt_err("failed to get event tx adapter[%d] caps", i); + return ret; + } + if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) t->internal_port = 0; - rte_eth_dev_info_get(i, &dev_info); + ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, i, &caps); + if (ret != 0) { + evt_err("failed to get event tx adapter[%d] caps", i); + return ret; + } + + if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) + local_port_conf.rxmode.offloads |= + RTE_ETH_RX_OFFLOAD_RSS_HASH; + + ret = rte_eth_dev_info_get(i, &dev_info); + if (ret != 0) { + evt_err("Error during getting device (port %u) info: %s\n", + i, strerror(-ret)); + return ret; + } + + /* Enable mbuf fast free if PMD has the capability. */ + if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) + local_port_conf.txmode.offloads |= + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; + rx_conf = dev_info.default_rxconf; rx_conf.offloads = port_conf.rxmode.offloads; @@ -206,19 +251,23 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) local_port_conf.rx_adv_conf.rss_conf.rss_hf); } - if (rte_eth_dev_configure(i, nb_queues, nb_queues, - &local_port_conf) - < 0) { + if (rte_eth_dev_configure(i, opt->eth_queues, nb_queues, + &local_port_conf) < 0) { evt_err("Failed to configure eth port [%d]", i); return -EINVAL; } - if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, - rte_socket_id(), &rx_conf, t->pool) < 0) { - evt_err("Failed to setup eth port [%d] rx_queue: %d.", + for (j = 0; j < opt->eth_queues; j++) { + if (rte_eth_rx_queue_setup( + i, j, NB_RX_DESC, rte_socket_id(), &rx_conf, + opt->per_port_pool ? t->pool[i] : + t->pool[0]) < 0) { + evt_err("Failed to setup eth port [%d] rx_queue: %d.", i, 0); - return -EINVAL; + return -EINVAL; + } } + if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, rte_socket_id(), NULL) < 0) { evt_err("Failed to setup eth port [%d] tx_queue: %d.", @@ -226,7 +275,12 @@ pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) return -EINVAL; } - rte_eth_promiscuous_enable(i); + ret = rte_eth_promiscuous_enable(i); + if (ret != 0) { + evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", + i, rte_strerror(-ret)); + return ret; + } } return 0; @@ -275,12 +329,26 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, { int ret = 0; uint16_t prod; + struct rte_mempool *vector_pool = NULL; struct rte_event_eth_rx_adapter_queue_conf queue_conf; memset(&queue_conf, 0, sizeof(struct rte_event_eth_rx_adapter_queue_conf)); queue_conf.ev.sched_type = opt->sched_type_list[0]; + if (opt->ena_vector) { + unsigned int nb_elem = (opt->pool_sz / opt->vector_size) << 1; + + nb_elem = nb_elem ? nb_elem : 1; + vector_pool = rte_event_vector_pool_create( + "vector_pool", nb_elem, 0, opt->vector_size, + opt->socket_id); + if (vector_pool == NULL) { + evt_err("failed to create event vector pool"); + return -ENOMEM; + } + } RTE_ETH_FOREACH_DEV(prod) { + struct rte_event_eth_rx_adapter_vector_limits limits; uint32_t cap; ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, @@ -291,6 +359,54 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, opt->dev_id); return ret; } + + if (opt->ena_vector) { + memset(&limits, 0, sizeof(limits)); + ret = rte_event_eth_rx_adapter_vector_limits_get( + opt->dev_id, prod, &limits); + if (ret) { + evt_err("failed to get vector limits"); + return ret; + } + + if (opt->vector_size < limits.min_sz || + opt->vector_size > limits.max_sz) { + evt_err("Vector size [%d] not within limits max[%d] min[%d]", + opt->vector_size, limits.min_sz, + limits.max_sz); + return -EINVAL; + } + + if (limits.log2_sz && + !rte_is_power_of_2(opt->vector_size)) { + evt_err("Vector size [%d] not power of 2", + opt->vector_size); + return -EINVAL; + } + + if (opt->vector_tmo_nsec > limits.max_timeout_ns || + opt->vector_tmo_nsec < limits.min_timeout_ns) { + evt_err("Vector timeout [%" PRIu64 + "] not within limits max[%" PRIu64 + "] min[%" PRIu64 "]", + opt->vector_tmo_nsec, + limits.max_timeout_ns, + limits.min_timeout_ns); + return -EINVAL; + } + + if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) { + queue_conf.vector_sz = opt->vector_size; + queue_conf.vector_timeout_ns = + opt->vector_tmo_nsec; + queue_conf.rx_queue_flags |= + RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR; + queue_conf.vector_mp = vector_pool; + } else { + evt_err("Rx adapter doesn't support event vector"); + return -EINVAL; + } + } queue_conf.ev.queue_id = prod * stride; ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, &prod_conf); @@ -306,7 +422,7 @@ pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, } if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { - uint32_t service_id; + uint32_t service_id = -1U; rte_event_eth_rx_adapter_service_id_get(prod, &service_id); @@ -343,6 +459,14 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt, return ret; } + if (opt->ena_vector) { + if (!(cap & + RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR)) { + evt_err("Tx adapter doesn't support event vector"); + return -EINVAL; + } + } + ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id, &port_conf); if (ret) { @@ -358,14 +482,18 @@ pipeline_event_tx_adapter_setup(struct evt_options *opt, } if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) { - uint32_t service_id; + uint32_t service_id = -1U; - rte_event_eth_tx_adapter_service_id_get(consm, - &service_id); + ret = rte_event_eth_tx_adapter_service_id_get(consm, + &service_id); + if (ret != -ESRCH && ret != 0) { + evt_err("Failed to get Tx adptr service ID"); + return ret; + } ret = evt_service_setup(service_id); if (ret) { evt_err("Failed to setup service core" - " for Tx adapter\n"); + " for Tx adapter"); return ret; } } @@ -404,17 +532,64 @@ int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) { struct test_pipeline *t = evt_test_priv(test); + int i, ret; + + if (!opt->mbuf_sz) + opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE; - t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ + if (!opt->max_pkt_sz) + opt->max_pkt_sz = RTE_ETHER_MAX_LEN; + + RTE_ETH_FOREACH_DEV(i) { + struct rte_eth_dev_info dev_info; + uint16_t data_size = 0; + + memset(&dev_info, 0, sizeof(dev_info)); + ret = rte_eth_dev_info_get(i, &dev_info); + if (ret != 0) { + evt_err("Error during getting device (port %u) info: %s\n", + i, strerror(-ret)); + return ret; + } + + if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && + dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { + data_size = opt->max_pkt_sz / + dev_info.rx_desc_lim.nb_mtu_seg_max; + data_size += RTE_PKTMBUF_HEADROOM; + + if (data_size > opt->mbuf_sz) + opt->mbuf_sz = data_size; + } + if (opt->per_port_pool) { + char name[RTE_MEMPOOL_NAMESIZE]; + + snprintf(name, RTE_MEMPOOL_NAMESIZE, "%s-%d", + test->name, i); + t->pool[i] = rte_pktmbuf_pool_create( + name, /* mempool name */ + opt->pool_sz, /* number of elements*/ + 0, /* cache size*/ + 0, opt->mbuf_sz, opt->socket_id); /* flags */ + + if (t->pool[i] == NULL) { + evt_err("failed to create mempool %s", name); + return -ENOMEM; + } + } + } + + if (!opt->per_port_pool) { + t->pool[0] = rte_pktmbuf_pool_create( + test->name, /* mempool name */ opt->pool_sz, /* number of elements*/ - 512, /* cache size*/ - 0, - RTE_MBUF_DEFAULT_BUF_SIZE, - opt->socket_id); /* flags */ - - if (t->pool == NULL) { - evt_err("failed to create mempool"); - return -ENOMEM; + 0, /* cache size*/ + 0, opt->mbuf_sz, opt->socket_id); /* flags */ + + if (t->pool[0] == NULL) { + evt_err("failed to create mempool"); + return -ENOMEM; + } } return 0; @@ -423,10 +598,16 @@ pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt) { - RTE_SET_USED(opt); struct test_pipeline *t = evt_test_priv(test); + int i; - rte_mempool_free(t->pool); + RTE_SET_USED(opt); + if (opt->per_port_pool) { + RTE_ETH_FOREACH_DEV(i) + rte_mempool_free(t->pool[i]); + } else { + rte_mempool_free(t->pool[0]); + } } int