.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
Quota and Watermark Sample Application
======================================
-The Quota and Watermark sample application is a simple example of packet processing using Data Plane Development Kit (DPDK) that
-showcases the use of a quota as the maximum number of packets enqueue/dequeue at a time and low and high watermarks
-to signal low and high ring usage respectively.
+The Quota and Watermark sample application is a simple example of packet
+processing using Data Plane Development Kit (DPDK) that showcases the use
+of a quota as the maximum number of packets enqueue/dequeue at a time and
+low and high thresholds, or watermarks, to signal low and high ring usage
+respectively.
-Additionally, it shows how ring watermarks can be used to feedback congestion notifications to data producers by
+Additionally, it shows how the thresholds can be used to feedback congestion notifications to data producers by
temporarily stopping processing overloaded rings and sending Ethernet flow control frames.
This sample application is split in two parts:
An adjustable quota value controls how many packets are being moved through the pipeline per enqueue and dequeue.
-Adjustable watermark values associated with the rings control a back-off mechanism that
+Adjustable threshold values associated with the rings control a back-off mechanism that
tries to prevent the pipeline from being overloaded by:
* Stopping enqueuing on rings for which the usage has crossed the high watermark threshold
Shared Variables Setup
^^^^^^^^^^^^^^^^^^^^^^
-The quota and low_watermark shared variables are put into an rte_memzone using a call to setup_shared_variables():
+The quota and high and low watermark shared variables are put into an rte_memzone using a call to setup_shared_variables():
.. code-block:: c
void
setup_shared_variables(void)
{
- const struct rte_memzone *qw_memzone;
-
- qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME, 2 * sizeof(int), rte_socket_id(), RTE_MEMZONE_2MB);
+ const struct rte_memzone *qw_memzone;
- if (qw_memzone == NULL)
- rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
+ 3 * sizeof(int), rte_socket_id(), 0);
+ if (qw_memzone == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
- quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
- }
+ quota = qw_memzone->addr;
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
+ }
-These two variables are initialized to a default value in main() and
+These three variables are initialized to a default value in main() and
can be changed while qw is running using the qwctl control program.
Application Arguments
/* Process each port round robin style */
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
-
- ring = rings[lcore_id][port_id];
-
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(ring) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
-
- /* Enqueue received packets on the RX ring */
-
- nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, *quota);
-
- ret = rte_ring_enqueue_bulk(ring, (void *) pkts, nb_rx_pkts);
- if (ret == -EDQUOT) {
- ring_state[port_id] = RING_OVERLOADED;
- send_pause_frame(port_id, 1337);
- }
+ if (!is_bit_set(port_id, portmask))
+ continue;
+
+ ring = rings[lcore_id][port_id];
+
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(ring) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
+
+ /* Enqueue received packets on the RX ring */
+ nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
+ (uint16_t) *quota);
+ ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
+ ring_state[port_id] = RING_OVERLOADED;
+ send_pause_frame(port_id, 1337);
+ }
+
+ if (ret == 0) {
+
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_rx_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
}
For each port in the port mask, the corresponding ring's pointer is fetched into ring and that ring's state is checked:
previous_lcore_id = get_previous_lcore_id(lcore_id);
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
-
- tx = rings[lcore_id][port_id];
- rx = rings[previous_lcore_id][port_id];
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(tx) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
-
- /* Dequeue up to quota mbuf from rx */
-
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
-
- if (unlikely(nb_dq_pkts < 0))
- continue;
-
- /* Enqueue them on tx */
-
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
- ring_state[port_id] = RING_OVERLOADED;
+ if (!is_bit_set(port_id, portmask))
+ continue;
+
+ tx = rings[lcore_id][port_id];
+ rx = rings[previous_lcore_id][port_id];
+
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(tx) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
+
+ /* Dequeue up to quota mbuf from rx */
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
+ if (unlikely(nb_dq_pkts < 0))
+ continue;
+
+ /* Enqueue them on tx */
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
+ ring_state[port_id] = RING_OVERLOADED;
+
+ if (ret == 0) {
+
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_dq_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
}
The thread's logic works mostly like receive_stage(),
quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}
int *quota;
unsigned int *low_watermark;
+unsigned int *high_watermark;
uint8_t port_pairs[RTE_MAX_ETHPORTS];
uint16_t nb_rx_pkts;
unsigned int lcore_id;
+ unsigned int free;
struct rte_mbuf *pkts[MAX_PKT_QUOTA];
struct rte_ring *ring;
nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
(uint16_t) *quota);
ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
- nb_rx_pkts);
- if (ret == -EDQUOT) {
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
ring_state[port_id] = RING_OVERLOADED;
send_pause_frame(port_id, 1337);
}
- else if (ret == -ENOBUFS) {
+ if (ret == 0) {
/*
* Return mbufs to the pool,
uint8_t port_id;
unsigned int lcore_id, previous_lcore_id;
+ unsigned int free;
void *pkts[MAX_PKT_QUOTA];
struct rte_ring *rx, *tx;
continue;
/* Enqueue them on tx */
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
ring_state[port_id] = RING_OVERLOADED;
- else if (ret == -ENOBUFS) {
+ if (ret == 0) {
/*
* Return mbufs to the pool,