#ifdef RTE_MBUF_REFCNT_ATOMIC
-static volatile uint32_t refcnt_stop_slaves;
+static volatile uint32_t refcnt_stop_workers;
static unsigned refcnt_lcore[RTE_MAX_LCORE];
#endif
#ifdef RTE_MBUF_REFCNT_ATOMIC
static int
-test_refcnt_slave(void *arg)
+test_refcnt_worker(void *arg)
{
unsigned lcore, free;
void *mp = 0;
printf("%s started at lcore %u\n", __func__, lcore);
free = 0;
- while (refcnt_stop_slaves == 0) {
+ while (refcnt_stop_workers == 0) {
if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
free++;
rte_pktmbuf_free(mp);
/* For each mbuf in the pool:
* - allocate mbuf,
* - increment it's reference up to N+1,
- * - enqueue it N times into the ring for slave cores to free.
+ * - enqueue it N times into the ring for worker cores to free.
*/
for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
rte_panic("(lcore=%u, iter=%u): was able to allocate only "
"%u from %u mbufs\n", lcore, iter, i, n);
- /* wait till slave lcores will consume all mbufs */
+ /* wait till worker lcores will consume all mbufs */
while (!rte_ring_empty(refcnt_mbuf_ring))
;
}
static int
-test_refcnt_master(struct rte_mempool *refcnt_pool,
+test_refcnt_main(struct rte_mempool *refcnt_pool,
struct rte_ring *refcnt_mbuf_ring)
{
unsigned i, lcore;
for (i = 0; i != REFCNT_MAX_ITER; i++)
test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
- refcnt_stop_slaves = 1;
+ refcnt_stop_workers = 1;
rte_wmb();
printf("%s finished at lcore %u\n", __func__, lcore);
test_refcnt_mbuf(void)
{
#ifdef RTE_MBUF_REFCNT_ATOMIC
- unsigned int master, slave, tref;
+ unsigned int main_lcore, worker, tref;
int ret = -1;
struct rte_mempool *refcnt_pool = NULL;
struct rte_ring *refcnt_mbuf_ring = NULL;
SOCKET_ID_ANY);
if (refcnt_pool == NULL) {
printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
- __func__);
+ __func__);
return -1;
}
refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
- rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
- RING_F_SP_ENQ);
+ rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
+ RING_F_SP_ENQ);
if (refcnt_mbuf_ring == NULL) {
printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
- "\n", __func__);
+ "\n", __func__);
goto err;
}
- refcnt_stop_slaves = 0;
+ refcnt_stop_workers = 0;
memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
- rte_eal_mp_remote_launch(test_refcnt_slave, refcnt_mbuf_ring,
- SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
- test_refcnt_master(refcnt_pool, refcnt_mbuf_ring);
+ test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
rte_eal_mp_wait_lcore();
/* check that we porcessed all references */
tref = 0;
- master = rte_get_master_lcore();
+ main_lcore = rte_get_main_lcore();
- RTE_LCORE_FOREACH_SLAVE(slave)
- tref += refcnt_lcore[slave];
+ RTE_LCORE_FOREACH_WORKER(worker)
+ tref += refcnt_lcore[worker];
- if (tref != refcnt_lcore[master])
+ if (tref != refcnt_lcore[main_lcore])
rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
- tref, refcnt_lcore[master]);
+ tref, refcnt_lcore[main_lcore]);
rte_mempool_dump(stdout, refcnt_pool);
rte_ring_dump(stdout, refcnt_mbuf_ring);
VAL_NAME(PKT_RX_FDIR_FLX),
VAL_NAME(PKT_RX_QINQ_STRIPPED),
VAL_NAME(PKT_RX_LRO),
- VAL_NAME(PKT_RX_TIMESTAMP),
VAL_NAME(PKT_RX_SEC_OFFLOAD),
VAL_NAME(PKT_RX_SEC_OFFLOAD_FAILED),
VAL_NAME(PKT_RX_OUTER_L4_CKSUM_BAD),
offset3 = rte_mbuf_dynfield_register_offset(&dynfield3,
offsetof(struct rte_mbuf, dynfield1[1]));
- if (offset3 != offsetof(struct rte_mbuf, dynfield1[1]))
- GOTO_FAIL("failed to register dynamic field 3, offset=%d: %s",
- offset3, strerror(errno));
+ if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) {
+ if (rte_errno == EBUSY)
+ printf("mbuf test error skipped: dynfield is busy\n");
+ else
+ GOTO_FAIL("failed to register dynamic field 3, offset="
+ "%d: %s", offset3, strerror(errno));
+ }
printf("dynfield: offset=%d, offset2=%d, offset3=%d\n",
offset, offset2, offset3);
flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3,
rte_bsf64(PKT_LAST_FREE));
if (flag3 != rte_bsf64(PKT_LAST_FREE))
- GOTO_FAIL("failed to register dynamic flag 3, flag2=%d: %s",
+ GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s",
flag3, strerror(errno));
printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3);
return -1;
}
+/* check that m->nb_segs and m->next are reset on mbuf free */
+static int
+test_nb_segs_and_next_reset(struct rte_mempool *pktmbuf_pool)
+{
+ struct rte_mbuf *m0 = NULL, *m1 = NULL, *m2 = NULL;
+
+ /* alloc mbufs */
+ m0 = rte_pktmbuf_alloc(pktmbuf_pool);
+ m1 = rte_pktmbuf_alloc(pktmbuf_pool);
+ m2 = rte_pktmbuf_alloc(pktmbuf_pool);
+ if (m0 == NULL || m1 == NULL || m2 == NULL)
+ GOTO_FAIL("Failed to allocate mbuf");
+
+ /* append data in all of them */
+ if (rte_pktmbuf_append(m0, 500) == NULL ||
+ rte_pktmbuf_append(m1, 500) == NULL ||
+ rte_pktmbuf_append(m2, 500) == NULL)
+ GOTO_FAIL("Failed to append data in mbuf");
+
+ /* chain them in one mbuf m0 */
+ rte_pktmbuf_chain(m1, m2);
+ rte_pktmbuf_chain(m0, m1);
+ if (m0->nb_segs != 3 || m0->next != m1 || m1->next != m2 ||
+ m2->next != NULL) {
+ m1 = m2 = NULL;
+ GOTO_FAIL("Failed to chain mbufs");
+ }
+
+ /* split m0 chain in two, between m1 and m2 */
+ m0->nb_segs = 2;
+ m1->next = NULL;
+ m2->nb_segs = 1;
+
+ /* free the 2 mbuf chains m0 and m2 */
+ rte_pktmbuf_free(m0);
+ rte_pktmbuf_free(m2);
+
+ /* ensure that m->next and m->nb_segs are reset in freed mbufs */
+ if (m0->nb_segs != 1 || m0->next != NULL ||
+ m1->nb_segs != 1 || m1->next != NULL ||
+ m2->nb_segs != 1 || m2->next != NULL) {
+ m0 = m1 = m2 = NULL;
+ GOTO_FAIL("nb_segs or next was not reset properly");
+ }
+
+ return 0;
+
+fail:
+ if (m0 != NULL)
+ rte_pktmbuf_free(m0);
+ if (m1 != NULL)
+ rte_pktmbuf_free(m1);
+ if (m2 != NULL)
+ rte_pktmbuf_free(m2);
+ return -1;
+}
+
static int
test_mbuf(void)
{
goto err;
}
+ /* test reset of m->nb_segs and m->next on mbuf free */
+ if (test_nb_segs_and_next_reset(pktmbuf_pool) < 0) {
+ printf("test_nb_segs_and_next_reset() failed\n");
+ goto err;
+ }
ret = 0;
err: