/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = 0;
+ bufs[i]->hash.usr = 0;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
if (rte_lcore_count() >= 3) {
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = (i & 1) << 8;
+ bufs[i]->hash.usr = (i & 1) << 8;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
* so load gets distributed */
clear_packet_count();
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = i;
+ bufs[i]->hash.usr = i;
rte_distributor_process(d, bufs, BURST);
rte_distributor_flush(d);
return -1;
}
for (i = 0; i < BIG_BATCH; i++)
- many_bufs[i]->hash.rss = i << 2;
+ many_bufs[i]->hash.usr = i << 2;
for (i = 0; i < BIG_BATCH/BURST; i++) {
rte_distributor_process(d, &many_bufs[i*BURST], BURST);
while (rte_mempool_get_bulk(p, (void *)bufs, BURST) < 0)
rte_distributor_process(d, NULL, 0);
for (j = 0; j < BURST; j++) {
- bufs[j]->hash.rss = (i+j) << 1;
+ bufs[j]->hash.usr = (i+j) << 1;
rte_mbuf_refcnt_set(bufs[j], 1);
}
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = 0;
+ bufs[i]->hash.usr = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
return -1;
}
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = 0;
+ bufs[i]->hash.usr = 0;
/* get worker zero to quit */
zero_quit = 1;
/* now set all hash values in all buffers to zero, so all pkts go to the
* one worker thread */
for (i = 0; i < BURST; i++)
- bufs[i]->hash.rss = 0;
+ bufs[i]->hash.usr = 0;
rte_distributor_process(d, bufs, BURST);
/* at this point, we will have processed some packets and have a full
zero_quit = 0;
quit = 1;
for (i = 0; i < num_workers; i++)
- bufs[i]->hash.rss = i << 1;
+ bufs[i]->hash.usr = i << 1;
rte_distributor_process(d, bufs, num_workers);
rte_mempool_put_bulk(p, (void *)bufs, num_workers);