X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=examples%2Feventdev_pipeline%2Fpipeline_worker_tx.c;h=ca6cd200caad316a9b24c85c76fa61aa1d49ddc9;hb=c94c9f3152a37fb31553e410ab7fd06f23802c71;hp=85eb075fc8714c023868711ccab4540861e62d6b;hpb=085edac2ca38105b629fed34fb82ad280eea129b;p=dpdk.git diff --git a/examples/eventdev_pipeline/pipeline_worker_tx.c b/examples/eventdev_pipeline/pipeline_worker_tx.c index 85eb075fc8..ca6cd200ca 100644 --- a/examples/eventdev_pipeline/pipeline_worker_tx.c +++ b/examples/eventdev_pipeline/pipeline_worker_tx.c @@ -40,7 +40,7 @@ worker_tx_pkt(const uint8_t dev, const uint8_t port, struct rte_event *ev) { exchange_mac(ev->mbuf); rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0); - while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1)) + while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1, 0)) rte_pause(); } @@ -436,6 +436,7 @@ setup_eventdev_worker_tx_enq(struct worker_data *worker_data) struct rte_event_dev_config config = { .nb_event_queues = nb_queues, .nb_event_ports = nb_ports, + .nb_single_link_event_port_queues = 0, .nb_events_limit = 4096, .nb_event_queue_flows = 1024, .nb_event_port_dequeue_depth = 128, @@ -465,6 +466,8 @@ setup_eventdev_worker_tx_enq(struct worker_data *worker_data) ret = rte_event_dev_info_get(dev_id, &dev_info); printf("\tEventdev %d: %s\n", dev_id, dev_info.driver_name); + if (dev_info.max_num_events < config.nb_events_limit) + config.nb_events_limit = dev_info.max_num_events; if (dev_info.max_event_port_dequeue_depth < config.nb_event_port_dequeue_depth) config.nb_event_port_dequeue_depth = @@ -528,6 +531,8 @@ setup_eventdev_worker_tx_enq(struct worker_data *worker_data) } printf("\n"); + if (wkr_p_conf.new_event_threshold > config.nb_events_limit) + wkr_p_conf.new_event_threshold = config.nb_events_limit; if (wkr_p_conf.dequeue_depth > config.nb_event_port_dequeue_depth) wkr_p_conf.dequeue_depth = config.nb_event_port_dequeue_depth; if (wkr_p_conf.enqueue_depth > config.nb_event_port_enqueue_depth) @@ -599,6 +604,133 @@ service_rx_adapter(void *arg) return 0; } +/* + * Initializes a given port using global settings and with the RX buffers + * coming from the mbuf_pool passed as a parameter. + */ +static inline int +port_init(uint8_t port, struct rte_mempool *mbuf_pool) +{ + struct rte_eth_rxconf rx_conf; + static const struct rte_eth_conf port_conf_default = { + .rxmode = { + .mq_mode = ETH_MQ_RX_RSS, + .max_rx_pkt_len = RTE_ETHER_MAX_LEN, + }, + .rx_adv_conf = { + .rss_conf = { + .rss_hf = ETH_RSS_IP | + ETH_RSS_TCP | + ETH_RSS_UDP, + } + } + }; + const uint16_t rx_rings = 1, tx_rings = 1; + const uint16_t rx_ring_size = 512, tx_ring_size = 512; + struct rte_eth_conf port_conf = port_conf_default; + int retval; + uint16_t q; + struct rte_eth_dev_info dev_info; + struct rte_eth_txconf txconf; + + if (!rte_eth_dev_is_valid_port(port)) + return -1; + + retval = rte_eth_dev_info_get(port, &dev_info); + if (retval != 0) { + printf("Error during getting device (port %u) info: %s\n", + port, strerror(-retval)); + return retval; + } + + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) + port_conf.txmode.offloads |= + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + rx_conf = dev_info.default_rxconf; + rx_conf.offloads = port_conf.rxmode.offloads; + + port_conf.rx_adv_conf.rss_conf.rss_hf &= + dev_info.flow_type_rss_offloads; + if (port_conf.rx_adv_conf.rss_conf.rss_hf != + port_conf_default.rx_adv_conf.rss_conf.rss_hf) { + printf("Port %u modified RSS hash function based on hardware support," + "requested:%#"PRIx64" configured:%#"PRIx64"\n", + port, + port_conf_default.rx_adv_conf.rss_conf.rss_hf, + port_conf.rx_adv_conf.rss_conf.rss_hf); + } + + /* Configure the Ethernet device. */ + retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf); + if (retval != 0) + return retval; + + /* Allocate and set up 1 RX queue per Ethernet port. */ + for (q = 0; q < rx_rings; q++) { + retval = rte_eth_rx_queue_setup(port, q, rx_ring_size, + rte_eth_dev_socket_id(port), &rx_conf, + mbuf_pool); + if (retval < 0) + return retval; + } + + txconf = dev_info.default_txconf; + txconf.offloads = port_conf_default.txmode.offloads; + /* Allocate and set up 1 TX queue per Ethernet port. */ + for (q = 0; q < tx_rings; q++) { + retval = rte_eth_tx_queue_setup(port, q, tx_ring_size, + rte_eth_dev_socket_id(port), &txconf); + if (retval < 0) + return retval; + } + + /* Display the port MAC address. */ + struct rte_ether_addr addr; + retval = rte_eth_macaddr_get(port, &addr); + if (retval != 0) { + printf("Failed to get MAC address (port %u): %s\n", + port, rte_strerror(-retval)); + return retval; + } + + printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8 + " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n", + (unsigned int)port, + addr.addr_bytes[0], addr.addr_bytes[1], + addr.addr_bytes[2], addr.addr_bytes[3], + addr.addr_bytes[4], addr.addr_bytes[5]); + + /* Enable RX in promiscuous mode for the Ethernet device. */ + retval = rte_eth_promiscuous_enable(port); + if (retval != 0) + return retval; + + return 0; +} + +static int +init_ports(uint16_t num_ports) +{ + uint16_t portid; + + if (!cdata.num_mbuf) + cdata.num_mbuf = 16384 * num_ports; + + struct rte_mempool *mp = rte_pktmbuf_pool_create("packet_pool", + /* mbufs */ cdata.num_mbuf, + /* cache_size */ 512, + /* priv_size*/ 0, + /* data_room_size */ RTE_MBUF_DEFAULT_BUF_SIZE, + rte_socket_id()); + + RTE_ETH_FOREACH_DEV(portid) + if (port_init(portid, mp) != 0) + rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n", + portid); + + return 0; +} + static void init_adapters(uint16_t nb_ports) { @@ -617,6 +749,9 @@ init_adapters(uint16_t nb_ports) .new_event_threshold = 4096, }; + init_ports(nb_ports); + if (adptr_p_conf.new_event_threshold > dev_info.max_num_events) + adptr_p_conf.new_event_threshold = dev_info.max_num_events; if (adptr_p_conf.dequeue_depth > dev_info.max_event_port_dequeue_depth) adptr_p_conf.dequeue_depth = dev_info.max_event_port_dequeue_depth;