return -1;
}
+/* check that m->nb_segs and m->next are reset on mbuf free */
+static int
+test_nb_segs_and_next_reset(struct rte_mempool *pktmbuf_pool)
+{
+ struct rte_mbuf *m0 = NULL, *m1 = NULL, *m2 = NULL;
+
+ /* alloc mbufs */
+ m0 = rte_pktmbuf_alloc(pktmbuf_pool);
+ m1 = rte_pktmbuf_alloc(pktmbuf_pool);
+ m2 = rte_pktmbuf_alloc(pktmbuf_pool);
+ if (m0 == NULL || m1 == NULL || m2 == NULL)
+ GOTO_FAIL("Failed to allocate mbuf");
+
+ /* append data in all of them */
+ if (rte_pktmbuf_append(m0, 500) == NULL ||
+ rte_pktmbuf_append(m1, 500) == NULL ||
+ rte_pktmbuf_append(m2, 500) == NULL)
+ GOTO_FAIL("Failed to append data in mbuf");
+
+ /* chain them in one mbuf m0 */
+ rte_pktmbuf_chain(m1, m2);
+ rte_pktmbuf_chain(m0, m1);
+ if (m0->nb_segs != 3 || m0->next != m1 || m1->next != m2 ||
+ m2->next != NULL) {
+ m1 = m2 = NULL;
+ GOTO_FAIL("Failed to chain mbufs");
+ }
+
+ /* split m0 chain in two, between m1 and m2 */
+ m0->nb_segs = 2;
+ m1->next = NULL;
+ m2->nb_segs = 1;
+
+ /* free the 2 mbuf chains m0 and m2 */
+ rte_pktmbuf_free(m0);
+ rte_pktmbuf_free(m2);
+
+ /* ensure that m->next and m->nb_segs are reset in freed mbufs */
+ if (m0->nb_segs != 1 || m0->next != NULL ||
+ m1->nb_segs != 1 || m1->next != NULL ||
+ m2->nb_segs != 1 || m2->next != NULL) {
+ m0 = m1 = m2 = NULL;
+ GOTO_FAIL("nb_segs or next was not reset properly");
+ }
+
+ return 0;
+
+fail:
+ if (m0 != NULL)
+ rte_pktmbuf_free(m0);
+ if (m1 != NULL)
+ rte_pktmbuf_free(m1);
+ if (m2 != NULL)
+ rte_pktmbuf_free(m2);
+ return -1;
+}
+
static int
test_mbuf(void)
{
goto err;
}
+ /* test reset of m->nb_segs and m->next on mbuf free */
+ if (test_nb_segs_and_next_reset(pktmbuf_pool) < 0) {
+ printf("test_nb_segs_and_next_reset() failed\n");
+ goto err;
+ }
ret = 0;
err:
* or non-atomic) is controlled by the RTE_MBUF_REFCNT_ATOMIC flag.
*/
uint16_t refcnt;
- uint16_t nb_segs; /**< Number of segments. */
+
+ /**
+ * Number of segments. Only valid for the first segment of an mbuf
+ * chain.
+ */
+ uint16_t nb_segs;
/** Input port (16 bits to support more than 256 virtual ports).
* The event eth Tx adapter uses this field to specify the output port.
/* second cache line - fields only used in slow path or on TX */
RTE_MARKER cacheline1 __rte_cache_min_aligned;
- struct rte_mbuf *next; /**< Next segment of scattered packet. */
+ /**
+ * Next segment of scattered packet. Must be NULL in the last segment or
+ * in case of non-segmented packet.
+ */
+ struct rte_mbuf *next;
/* fields to support TX offloads */
RTE_STD_C11