X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=examples%2Fpacket_ordering%2Fmain.c;h=49ae35b8ce67a49caea7ffde8512a160ba19fa6e;hb=06ff2ba9bdcb5d3da0d7eaaff3a242f140903513;hp=75e2f4613be6c4fc0b9f6fa6388853f83fccdd7b;hpb=850f3733f84031e95087d65aa112ecdce2d0e0b8;p=dpdk.git diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c index 75e2f4613b..49ae35b8ce 100644 --- a/examples/packet_ordering/main.c +++ b/examples/packet_ordering/main.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -50,22 +51,10 @@ #define MAX_PKTS_BURST 32 #define REORDER_BUFFER_SIZE 8192 #define MBUF_PER_POOL 65535 -#define MBUF_SIZE (1600 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) #define MBUF_POOL_CACHE_SIZE 250 #define RING_SIZE 16384 -/* uncommnet below line to enable debug logs */ -/* #define DEBUG */ - -#ifdef DEBUG -#define LOG_LEVEL RTE_LOG_DEBUG -#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args) -#else -#define LOG_LEVEL RTE_LOG_INFO -#define LOG_DEBUG(log_type, fmt, args...) do {} while (0) -#endif - /* Macros for printing using RTE_LOG */ #define RTE_LOGTYPE_REORDERAPP RTE_LOGTYPE_USER1 @@ -82,9 +71,9 @@ struct worker_thread_args { struct rte_ring *ring_out; }; -struct output_buffer { - unsigned count; - struct rte_mbuf *mbufs[MAX_PKTS_BURST]; +struct send_thread_args { + struct rte_ring *ring_in; + struct rte_reorder_buffer *buffer; }; volatile struct app_stats { @@ -227,7 +216,69 @@ parse_args(int argc, char **argv) } argv[optind-1] = prgname; - optind = 0; /* reset getopt lib */ + optind = 1; /* reset getopt lib */ + return 0; +} + +/* + * Tx buffer error callback + */ +static void +flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count, + void *userdata __rte_unused) { + + /* free the mbufs which failed from transmit */ + app_stats.tx.ro_tx_failed_pkts += count; + RTE_LOG_DP(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__); + pktmbuf_free_bulk(unsent, count); + +} + +static inline int +free_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) { + const uint8_t nb_ports = rte_eth_dev_count(); + unsigned port_id; + + /* initialize buffers for all ports */ + for (port_id = 0; port_id < nb_ports; port_id++) { + /* skip ports that are not enabled */ + if ((portmask & (1 << port_id)) == 0) + continue; + + rte_free(tx_buffer[port_id]); + } + return 0; +} + +static inline int +configure_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[]) +{ + const uint8_t nb_ports = rte_eth_dev_count(); + unsigned port_id; + int ret; + + /* initialize buffers for all ports */ + for (port_id = 0; port_id < nb_ports; port_id++) { + /* skip ports that are not enabled */ + if ((portmask & (1 << port_id)) == 0) + continue; + + /* Initialize TX buffers */ + tx_buffer[port_id] = rte_zmalloc_socket("tx_buffer", + RTE_ETH_TX_BUFFER_SIZE(MAX_PKTS_BURST), 0, + rte_eth_dev_socket_id(port_id)); + if (tx_buffer[port_id] == NULL) + rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n", + (unsigned) port_id); + + rte_eth_tx_buffer_init(tx_buffer[port_id], MAX_PKTS_BURST); + + ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id], + flush_tx_error_callback, NULL); + if (ret < 0) + rte_exit(EXIT_FAILURE, "Cannot set error callback for " + "tx buffer on port %u\n", (unsigned) port_id); + } return 0; } @@ -359,7 +410,7 @@ rx_thread(struct rte_ring *ring_out) nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, MAX_PKTS_BURST); if (nb_rx_pkts == 0) { - LOG_DEBUG(REORDERAPP, + RTE_LOG_DP(DEBUG, REORDERAPP, "%s():Received zero packets\n", __func__); continue; } @@ -370,8 +421,8 @@ rx_thread(struct rte_ring *ring_out) pkts[i++]->seqn = seqn++; /* enqueue to rx_to_workers ring */ - ret = rte_ring_enqueue_burst(ring_out, (void *) pkts, - nb_rx_pkts); + ret = rte_ring_enqueue_burst(ring_out, + (void *)pkts, nb_rx_pkts, NULL); app_stats.rx.enqueue_pkts += ret; if (unlikely(ret < nb_rx_pkts)) { app_stats.rx.enqueue_failed_pkts += @@ -411,7 +462,7 @@ worker_thread(void *args_ptr) /* dequeue the mbufs from rx_to_workers ring */ burst_size = rte_ring_dequeue_burst(ring_in, - (void *)burst_buffer, MAX_PKTS_BURST); + (void *)burst_buffer, MAX_PKTS_BURST, NULL); if (unlikely(burst_size == 0)) continue; @@ -422,7 +473,8 @@ worker_thread(void *args_ptr) burst_buffer[i++]->port ^= xor_val; /* enqueue the modified mbufs to workers_to_tx ring */ - ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size); + ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, + burst_size, NULL); __sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret); if (unlikely(ret < burst_size)) { /* Return the mbufs to their respective pool, dropping packets */ @@ -434,46 +486,31 @@ worker_thread(void *args_ptr) return 0; } -static inline void -flush_one_port(struct output_buffer *outbuf, uint8_t outp) -{ - unsigned nb_tx = rte_eth_tx_burst(outp, 0, outbuf->mbufs, - outbuf->count); - app_stats.tx.ro_tx_pkts += nb_tx; - - if (unlikely(nb_tx < outbuf->count)) { - /* free the mbufs which failed from transmit */ - app_stats.tx.ro_tx_failed_pkts += (outbuf->count - nb_tx); - LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__); - pktmbuf_free_bulk(&outbuf->mbufs[nb_tx], outbuf->count - nb_tx); - } - outbuf->count = 0; -} - /** * Dequeue mbufs from the workers_to_tx ring and reorder them before * transmitting. */ static int -send_thread(struct rte_ring *ring_in) +send_thread(struct send_thread_args *args) { int ret; unsigned int i, dret; uint16_t nb_dq_mbufs; uint8_t outp; - static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS]; + unsigned sent; struct rte_mbuf *mbufs[MAX_PKTS_BURST]; struct rte_mbuf *rombufs[MAX_PKTS_BURST] = {NULL}; - struct rte_reorder_buffer *buffer; + static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; + + RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id()); + + configure_tx_buffers(tx_buffer); - RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, - rte_lcore_id()); - buffer = rte_reorder_create("PKT_RO", rte_socket_id(), REORDER_BUFFER_SIZE); while (!quit_signal) { /* deque the mbufs from workers_to_tx ring */ - nb_dq_mbufs = rte_ring_dequeue_burst(ring_in, - (void *)mbufs, MAX_PKTS_BURST); + nb_dq_mbufs = rte_ring_dequeue_burst(args->ring_in, + (void *)mbufs, MAX_PKTS_BURST, NULL); if (unlikely(nb_dq_mbufs == 0)) continue; @@ -482,11 +519,12 @@ send_thread(struct rte_ring *ring_in) for (i = 0; i < nb_dq_mbufs; i++) { /* send dequeued mbufs for reordering */ - ret = rte_reorder_insert(buffer, mbufs[i]); + ret = rte_reorder_insert(args->buffer, mbufs[i]); if (ret == -1 && rte_errno == ERANGE) { /* Too early pkts should be transmitted out directly */ - LOG_DEBUG(REORDERAPP, "%s():Cannot reorder early packet " + RTE_LOG_DP(DEBUG, REORDERAPP, + "%s():Cannot reorder early packet " "direct enqueuing to TX\n", __func__); outp = mbufs[i]->port; if ((portmask & (1 << outp)) == 0) { @@ -510,10 +548,10 @@ send_thread(struct rte_ring *ring_in) * drain MAX_PKTS_BURST of reordered * mbufs for transmit */ - dret = rte_reorder_drain(buffer, rombufs, MAX_PKTS_BURST); + dret = rte_reorder_drain(args->buffer, rombufs, MAX_PKTS_BURST); for (i = 0; i < dret; i++) { - struct output_buffer *outbuf; + struct rte_eth_dev_tx_buffer *outbuf; uint8_t outp1; outp1 = rombufs[i]->port; @@ -523,12 +561,15 @@ send_thread(struct rte_ring *ring_in) continue; } - outbuf = &tx_buffers[outp1]; - outbuf->mbufs[outbuf->count++] = rombufs[i]; - if (outbuf->count == MAX_PKTS_BURST) - flush_one_port(outbuf, outp1); + outbuf = tx_buffer[outp1]; + sent = rte_eth_tx_buffer(outp1, 0, outbuf, rombufs[i]); + if (sent) + app_stats.tx.ro_tx_pkts += sent; } } + + free_tx_buffers(tx_buffer); + return 0; } @@ -540,17 +581,21 @@ tx_thread(struct rte_ring *ring_in) { uint32_t i, dqnum; uint8_t outp; - static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS]; + unsigned sent; struct rte_mbuf *mbufs[MAX_PKTS_BURST]; - struct output_buffer *outbuf; + struct rte_eth_dev_tx_buffer *outbuf; + static struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS]; RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__, rte_lcore_id()); + + configure_tx_buffers(tx_buffer); + while (!quit_signal) { /* deque the mbufs from workers_to_tx ring */ dqnum = rte_ring_dequeue_burst(ring_in, - (void *)mbufs, MAX_PKTS_BURST); + (void *)mbufs, MAX_PKTS_BURST, NULL); if (unlikely(dqnum == 0)) continue; @@ -565,10 +610,10 @@ tx_thread(struct rte_ring *ring_in) continue; } - outbuf = &tx_buffers[outp]; - outbuf->mbufs[outbuf->count++] = mbufs[i]; - if (outbuf->count == MAX_PKTS_BURST) - flush_one_port(outbuf, outp); + outbuf = tx_buffer[outp]; + sent = rte_eth_tx_buffer(outp, 0, outbuf, mbufs[i]); + if (sent) + app_stats.tx.ro_tx_pkts += sent; } } @@ -584,6 +629,7 @@ main(int argc, char **argv) uint8_t port_id; uint8_t nb_ports_available; struct worker_thread_args worker_args = {NULL, NULL}; + struct send_thread_args send_args = {NULL, NULL}; struct rte_ring *rx_to_workers; struct rte_ring *workers_to_tx; @@ -618,12 +664,9 @@ main(int argc, char **argv) rte_exit(EXIT_FAILURE, "Error: number of ports must be even, except " "when using a single port\n"); - mbuf_pool = rte_mempool_create("mbuf_pool", MBUF_PER_POOL, MBUF_SIZE, - MBUF_POOL_CACHE_SIZE, - sizeof(struct rte_pktmbuf_pool_private), - rte_pktmbuf_pool_init, NULL, - rte_pktmbuf_init, NULL, - rte_socket_id(), 0); + mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, + MBUF_POOL_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, + rte_socket_id()); if (mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); @@ -661,6 +704,13 @@ main(int argc, char **argv) if (workers_to_tx == NULL) rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); + if (!disable_reorder) { + send_args.buffer = rte_reorder_create("PKT_RO", rte_socket_id(), + REORDER_BUFFER_SIZE); + if (send_args.buffer == NULL) + rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno)); + } + last_lcore_id = get_last_lcore_id(); master_lcore_id = rte_get_master_lcore(); @@ -673,14 +723,16 @@ main(int argc, char **argv) rte_eal_remote_launch(worker_thread, (void *)&worker_args, lcore_id); - if (disable_reorder) + if (disable_reorder) { /* Start tx_thread() on the last slave core */ rte_eal_remote_launch((lcore_function_t *)tx_thread, workers_to_tx, last_lcore_id); - else + } else { + send_args.ring_in = workers_to_tx; /* Start send_thread() on the last slave core */ - rte_eal_remote_launch((lcore_function_t *)send_thread, workers_to_tx, - last_lcore_id); + rte_eal_remote_launch((lcore_function_t *)send_thread, + (void *)&send_args, last_lcore_id); + } /* Start rx_thread() on the master core */ rx_thread(rx_to_workers);