ring: return free space when enqueuing
[dpdk.git] / examples / packet_ordering / main.c
index 15bb900..569b6da 100644 (file)
 
 #define RING_SIZE 16384
 
-/* uncomment below line to enable debug logs */
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define LOG_LEVEL RTE_LOG_DEBUG
-#define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
-#else
-#define LOG_LEVEL RTE_LOG_INFO
-#define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
-#endif
-
 /* Macros for printing using RTE_LOG */
 #define RTE_LOGTYPE_REORDERAPP          RTE_LOGTYPE_USER1
 
@@ -227,7 +216,7 @@ parse_args(int argc, char **argv)
        }
 
        argv[optind-1] = prgname;
-       optind = 0; /* reset getopt lib */
+       optind = 1; /* reset getopt lib */
        return 0;
 }
 
@@ -240,7 +229,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
 
        /* free the mbufs which failed from transmit */
        app_stats.tx.ro_tx_failed_pkts += count;
-       LOG_DEBUG(REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
+       RTE_LOG_DP(DEBUG, REORDERAPP, "%s:Packet loss with tx_burst\n", __func__);
        pktmbuf_free_bulk(unsent, count);
 
 }
@@ -421,7 +410,7 @@ rx_thread(struct rte_ring *ring_out)
                                nb_rx_pkts = rte_eth_rx_burst(port_id, 0,
                                                                pkts, MAX_PKTS_BURST);
                                if (nb_rx_pkts == 0) {
-                                       LOG_DEBUG(REORDERAPP,
+                                       RTE_LOG_DP(DEBUG, REORDERAPP,
                                        "%s():Received zero packets\n", __func__);
                                        continue;
                                }
@@ -432,8 +421,8 @@ rx_thread(struct rte_ring *ring_out)
                                        pkts[i++]->seqn = seqn++;
 
                                /* enqueue to rx_to_workers ring */
-                               ret = rte_ring_enqueue_burst(ring_out, (void *) pkts,
-                                                               nb_rx_pkts);
+                               ret = rte_ring_enqueue_burst(ring_out,
+                                               (void *)pkts, nb_rx_pkts, NULL);
                                app_stats.rx.enqueue_pkts += ret;
                                if (unlikely(ret < nb_rx_pkts)) {
                                        app_stats.rx.enqueue_failed_pkts +=
@@ -484,7 +473,8 @@ worker_thread(void *args_ptr)
                        burst_buffer[i++]->port ^= xor_val;
 
                /* enqueue the modified mbufs to workers_to_tx ring */
-               ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer, burst_size);
+               ret = rte_ring_enqueue_burst(ring_out, (void *)burst_buffer,
+                               burst_size, NULL);
                __sync_fetch_and_add(&app_stats.wkr.enqueue_pkts, ret);
                if (unlikely(ret < burst_size)) {
                        /* Return the mbufs to their respective pool, dropping packets */
@@ -533,7 +523,8 @@ send_thread(struct send_thread_args *args)
 
                        if (ret == -1 && rte_errno == ERANGE) {
                                /* Too early pkts should be transmitted out directly */
-                               LOG_DEBUG(REORDERAPP, "%s():Cannot reorder early packet "
+                               RTE_LOG_DP(DEBUG, REORDERAPP,
+                                               "%s():Cannot reorder early packet "
                                                "direct enqueuing to TX\n", __func__);
                                outp = mbufs[i]->port;
                                if ((portmask & (1 << outp)) == 0) {