#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define TX_QUEUE_FLUSH_MASK 0xFFFFFFFF
#define TSC_COUNT_LIMIT 1000
{
uint32_t lcoreid;
struct lcore_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
lcoreid = rte_lcore_id();
qconf = &lcore_conf[lcoreid];
tsc = rte_rdtsc();
diff_tsc = tsc - qconf->tsc;
- if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+ if (unlikely(diff_tsc > drain_tsc)) {
nic_tx_flush_queues(qconf);
crypto_flush_tx_queue(lcoreid);
qconf->tsc = tsc;
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
- uint64_t prev_tsc = 0;
- uint64_t diff_tsc, cur_tsc;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
uint8_t portid;
struct lcore_queue_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
lcore_id = rte_lcore_id();
qconf = &lcore_queue_conf[lcore_id];
* TX burst queue drain
*/
diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+ if (unlikely(diff_tsc > drain_tsc)) {
/*
* This could be optimized (use queueid instead of
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/* Configure how many packets ahead to prefetch, when reading packets */
#define PREFETCH_OFFSET 3
{
uint64_t cur_tsc;
uint8_t portid;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
cur_tsc = rte_rdtsc();
- if (likely (cur_tsc < qconf->tx_tsc + BURST_TX_DRAIN))
+ if (likely (cur_tsc < qconf->tx_tsc + drain_tsc))
return;
for (portid = 0; portid < MAX_PORTS; portid++) {
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/*
* Configurable number of RX/TX ring descriptors
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mbuf *m;
unsigned lcore_id;
- uint64_t prev_tsc = 0;
- uint64_t diff_tsc, cur_tsc, timer_tsc;
+ uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
unsigned i, j, portid, nb_rx;
struct lcore_queue_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+ prev_tsc = 0;
timer_tsc = 0;
lcore_id = rte_lcore_id();
* TX burst queue drain
*/
diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+ if (unlikely(diff_tsc > drain_tsc)) {
for (portid = 0; portid < RTE_MAX_ETHPORTS; portid++) {
if (qconf->tx_mbufs[portid].len == 0)
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 8
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
- uint64_t prev_tsc = 0;
- uint64_t diff_tsc, cur_tsc;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
uint8_t portid, queueid;
struct lcore_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
lcore_id = rte_lcore_id();
qconf = &lcore_conf[lcore_id];
* TX burst queue drain
*/
diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+ if (unlikely(diff_tsc > drain_tsc)) {
/*
* This could be optimized (use queueid instead of
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 8
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
- uint64_t prev_tsc = 0;
- uint64_t diff_tsc, cur_tsc;
+ uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
uint8_t portid, queueid;
struct lcore_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+
+ prev_tsc = 0;
lcore_id = rte_lcore_id();
qconf = &lcore_conf[lcore_id];
* TX burst queue drain
*/
diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+ if (unlikely(diff_tsc > drain_tsc)) {
/*
* This could be optimized (use queueid instead of
#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
#define MAX_PKT_BURST 32
-#define BURST_TX_DRAIN 200000ULL /* around 100us at 2 Ghz */
+#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
/*
* Configurable number of RX/TX ring descriptors
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mbuf *m;
unsigned lcore_id;
- uint64_t prev_tsc = 0;
- uint64_t diff_tsc, cur_tsc, timer_tsc;
+ uint64_t prev_tsc, diff_tsc, cur_tsc, timer_tsc;
unsigned i, j, portid, nb_rx;
struct lcore_queue_conf *qconf;
+ const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
+ prev_tsc = 0;
timer_tsc = 0;
lcore_id = rte_lcore_id();
* TX burst queue drain
*/
diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > BURST_TX_DRAIN)) {
+ if (unlikely(diff_tsc > drain_tsc)) {
/* this could be optimized (use queueid instead of
* portid), but it is not called so often */