* Copyright(c) 2010-2014 Intel Corporation
*/
+#include "test.h"
+
#include <string.h>
#include <stdarg.h>
#include <stdio.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
-#include <rte_atomic.h>
#include <rte_branch_prediction.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_tcp.h>
#include <rte_mbuf_dyn.h>
-#include "test.h"
-
#define MEMPOOL_CACHE_SIZE 32
#define MBUF_DATA_SIZE 2048
#define NB_MBUF 128
#ifdef RTE_MBUF_REFCNT_ATOMIC
-static volatile uint32_t refcnt_stop_slaves;
+static volatile uint32_t refcnt_stop_workers;
static unsigned refcnt_lcore[RTE_MAX_LCORE];
#endif
return 0;
fail:
- if (m)
- rte_pktmbuf_free(m);
+ rte_pktmbuf_free(m);
return -1;
}
+static uint16_t
+testclone_refcnt_read(struct rte_mbuf *m)
+{
+ return RTE_MBUF_HAS_PINNED_EXTBUF(m) ?
+ rte_mbuf_ext_refcnt_read(m->shinfo) :
+ rte_mbuf_refcnt_read(m);
+}
+
static int
-testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool)
+testclone_testupdate_testdetach(struct rte_mempool *pktmbuf_pool,
+ struct rte_mempool *clone_pool)
{
struct rte_mbuf *m = NULL;
struct rte_mbuf *clone = NULL;
*data = MAGIC_DATA;
/* clone the allocated mbuf */
- clone = rte_pktmbuf_clone(m, pktmbuf_pool);
+ clone = rte_pktmbuf_clone(m, clone_pool);
if (clone == NULL)
GOTO_FAIL("cannot clone data\n");
if (*data != MAGIC_DATA)
GOTO_FAIL("invalid data in clone\n");
- if (rte_mbuf_refcnt_read(m) != 2)
+ if (testclone_refcnt_read(m) != 2)
GOTO_FAIL("invalid refcnt in m\n");
/* free the clone */
data = rte_pktmbuf_mtod(m->next, unaligned_uint32_t *);
*data = MAGIC_DATA;
- clone = rte_pktmbuf_clone(m, pktmbuf_pool);
+ clone = rte_pktmbuf_clone(m, clone_pool);
if (clone == NULL)
GOTO_FAIL("cannot clone data\n");
if (*data != MAGIC_DATA)
GOTO_FAIL("invalid data in clone->next\n");
- if (rte_mbuf_refcnt_read(m) != 2)
+ if (testclone_refcnt_read(m) != 2)
GOTO_FAIL("invalid refcnt in m\n");
- if (rte_mbuf_refcnt_read(m->next) != 2)
+ if (testclone_refcnt_read(m->next) != 2)
GOTO_FAIL("invalid refcnt in m->next\n");
/* try to clone the clone */
- clone2 = rte_pktmbuf_clone(clone, pktmbuf_pool);
+ clone2 = rte_pktmbuf_clone(clone, clone_pool);
if (clone2 == NULL)
GOTO_FAIL("cannot clone the clone\n");
if (*data != MAGIC_DATA)
GOTO_FAIL("invalid data in clone2->next\n");
- if (rte_mbuf_refcnt_read(m) != 3)
+ if (testclone_refcnt_read(m) != 3)
GOTO_FAIL("invalid refcnt in m\n");
- if (rte_mbuf_refcnt_read(m->next) != 3)
+ if (testclone_refcnt_read(m->next) != 3)
GOTO_FAIL("invalid refcnt in m->next\n");
/* free mbuf */
return 0;
fail:
- if (m)
- rte_pktmbuf_free(m);
- if (clone)
- rte_pktmbuf_free(clone);
- if (clone2)
- rte_pktmbuf_free(clone2);
+ rte_pktmbuf_free(m);
+ rte_pktmbuf_free(clone);
+ rte_pktmbuf_free(clone2);
return -1;
}
static int
-test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool)
+test_pktmbuf_copy(struct rte_mempool *pktmbuf_pool,
+ struct rte_mempool *clone_pool)
{
struct rte_mbuf *m = NULL;
struct rte_mbuf *copy = NULL;
copy = NULL;
/* same test with a cloned mbuf */
- clone = rte_pktmbuf_clone(m, pktmbuf_pool);
+ clone = rte_pktmbuf_clone(m, clone_pool);
if (clone == NULL)
GOTO_FAIL("cannot clone data\n");
- if (!RTE_MBUF_CLONED(clone))
+ if ((!RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
+ !RTE_MBUF_CLONED(clone)) ||
+ (RTE_MBUF_HAS_PINNED_EXTBUF(m) &&
+ !RTE_MBUF_HAS_EXTBUF(clone)))
GOTO_FAIL("clone did not give a cloned mbuf\n");
copy = rte_pktmbuf_copy(clone, pktmbuf_pool, 0, UINT32_MAX);
return 0;
fail:
- if (m)
- rte_pktmbuf_free(m);
- if (copy)
- rte_pktmbuf_free(copy);
- if (copy2)
- rte_pktmbuf_free(copy2);
+ rte_pktmbuf_free(m);
+ rte_pktmbuf_free(copy);
+ rte_pktmbuf_free(copy2);
return -1;
}
return 0;
fail:
- if (m)
- rte_pktmbuf_free(m);
- if (clone)
- rte_pktmbuf_free(clone);
- if (clone2)
- rte_pktmbuf_free(clone2);
+ rte_pktmbuf_free(m);
+ rte_pktmbuf_free(clone);
+ rte_pktmbuf_free(clone2);
return -1;
}
}
/* free them */
for (i=0; i<NB_MBUF; i++) {
- if (m[i] != NULL)
- rte_pktmbuf_free(m[i]);
+ rte_pktmbuf_free(m[i]);
}
return ret;
/* free them */
for (i=0; i<NB_MBUF; i++) {
- if (m[i] != NULL)
- rte_pktmbuf_free(m[i]);
+ rte_pktmbuf_free(m[i]);
}
for (i=0; i<NB_MBUF; i++)
/* free them */
for (i=0; i<NB_MBUF; i++) {
- if (m[i] != NULL)
- rte_pktmbuf_free(m[i]);
+ rte_pktmbuf_free(m[i]);
}
return ret;
#ifdef RTE_MBUF_REFCNT_ATOMIC
static int
-test_refcnt_slave(void *arg)
+test_refcnt_worker(void *arg)
{
unsigned lcore, free;
void *mp = 0;
printf("%s started at lcore %u\n", __func__, lcore);
free = 0;
- while (refcnt_stop_slaves == 0) {
+ while (refcnt_stop_workers == 0) {
if (rte_ring_dequeue(refcnt_mbuf_ring, &mp) == 0) {
free++;
rte_pktmbuf_free(mp);
/* For each mbuf in the pool:
* - allocate mbuf,
* - increment it's reference up to N+1,
- * - enqueue it N times into the ring for slave cores to free.
+ * - enqueue it N times into the ring for worker cores to free.
*/
for (i = 0, n = rte_mempool_avail_count(refcnt_pool);
i != n && (m = rte_pktmbuf_alloc(refcnt_pool)) != NULL;
rte_panic("(lcore=%u, iter=%u): was able to allocate only "
"%u from %u mbufs\n", lcore, iter, i, n);
- /* wait till slave lcores will consume all mbufs */
+ /* wait till worker lcores will consume all mbufs */
while (!rte_ring_empty(refcnt_mbuf_ring))
;
}
static int
-test_refcnt_master(struct rte_mempool *refcnt_pool,
+test_refcnt_main(struct rte_mempool *refcnt_pool,
struct rte_ring *refcnt_mbuf_ring)
{
unsigned i, lcore;
for (i = 0; i != REFCNT_MAX_ITER; i++)
test_refcnt_iter(lcore, i, refcnt_pool, refcnt_mbuf_ring);
- refcnt_stop_slaves = 1;
+ refcnt_stop_workers = 1;
rte_wmb();
printf("%s finished at lcore %u\n", __func__, lcore);
test_refcnt_mbuf(void)
{
#ifdef RTE_MBUF_REFCNT_ATOMIC
- unsigned int master, slave, tref;
+ unsigned int main_lcore, worker, tref;
int ret = -1;
struct rte_mempool *refcnt_pool = NULL;
struct rte_ring *refcnt_mbuf_ring = NULL;
SOCKET_ID_ANY);
if (refcnt_pool == NULL) {
printf("%s: cannot allocate " MAKE_STRING(refcnt_pool) "\n",
- __func__);
+ __func__);
return -1;
}
refcnt_mbuf_ring = rte_ring_create("refcnt_mbuf_ring",
- rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
- RING_F_SP_ENQ);
+ rte_align32pow2(REFCNT_RING_SIZE), SOCKET_ID_ANY,
+ RING_F_SP_ENQ);
if (refcnt_mbuf_ring == NULL) {
printf("%s: cannot allocate " MAKE_STRING(refcnt_mbuf_ring)
- "\n", __func__);
+ "\n", __func__);
goto err;
}
- refcnt_stop_slaves = 0;
+ refcnt_stop_workers = 0;
memset(refcnt_lcore, 0, sizeof (refcnt_lcore));
- rte_eal_mp_remote_launch(test_refcnt_slave, refcnt_mbuf_ring,
- SKIP_MASTER);
+ rte_eal_mp_remote_launch(test_refcnt_worker, refcnt_mbuf_ring, SKIP_MAIN);
- test_refcnt_master(refcnt_pool, refcnt_mbuf_ring);
+ test_refcnt_main(refcnt_pool, refcnt_mbuf_ring);
rte_eal_mp_wait_lcore();
- /* check that we porcessed all references */
+ /* check that we processed all references */
tref = 0;
- master = rte_get_master_lcore();
+ main_lcore = rte_get_main_lcore();
- RTE_LCORE_FOREACH_SLAVE(slave)
- tref += refcnt_lcore[slave];
+ RTE_LCORE_FOREACH_WORKER(worker)
+ tref += refcnt_lcore[worker];
- if (tref != refcnt_lcore[master])
- rte_panic("refernced mbufs: %u, freed mbufs: %u\n",
- tref, refcnt_lcore[master]);
+ if (tref != refcnt_lcore[main_lcore])
+ rte_panic("referenced mbufs: %u, freed mbufs: %u\n",
+ tref, refcnt_lcore[main_lcore]);
rte_mempool_dump(stdout, refcnt_pool);
rte_ring_dump(stdout, refcnt_mbuf_ring);
#endif
}
+#ifdef RTE_EXEC_ENV_WINDOWS
+static int
+test_failing_mbuf_sanity_check(struct rte_mempool *pktmbuf_pool)
+{
+ RTE_SET_USED(pktmbuf_pool);
+ return TEST_SKIPPED;
+}
+#else
+
#include <unistd.h>
+#include <sys/resource.h>
+#include <sys/time.h>
#include <sys/wait.h>
/* use fork() to test mbuf errors panic */
pid = fork();
if (pid == 0) {
+ struct rlimit rl;
+
+ /* No need to generate a coredump when panicking. */
+ rl.rlim_cur = rl.rlim_max = 0;
+ setrlimit(RLIMIT_CORE, &rl);
rte_mbuf_sanity_check(buf, 1); /* should panic */
exit(0); /* return normally if it doesn't panic */
- } else if (pid < 0){
+ } else if (pid < 0) {
printf("Fork Failed\n");
return -1;
}
buf = rte_pktmbuf_alloc(pktmbuf_pool);
if (buf == NULL)
return -1;
+
printf("Checking good mbuf initially\n");
if (verify_mbuf_check_panics(buf) != -1)
return -1;
return 0;
}
+#endif /* !RTE_EXEC_ENV_WINDOWS */
+
static int
test_mbuf_linearize(struct rte_mempool *pktmbuf_pool, int pkt_len,
int nb_segs)
return 0;
fail:
- if (mbuf)
- rte_pktmbuf_free(mbuf);
+ rte_pktmbuf_free(mbuf);
return -1;
}
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
/* Test case to check with zero buffer len */
- ret = rte_get_rx_ol_flag_list(PKT_RX_L4_CKSUM_MASK, buf, 0);
+ ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_L4_CKSUM_MASK, buf, 0);
if (ret != -1)
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
"non-zero, buffer should not be empty");
/* Test case to check with valid mask value */
- ret = rte_get_rx_ol_flag_list(PKT_RX_SEC_OFFLOAD, buf, sizeof(buf));
+ ret = rte_get_rx_ol_flag_list(RTE_MBUF_F_RX_SEC_OFFLOAD, buf,
+ sizeof(buf));
if (ret != 0)
GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
/* Test case to check with zero buffer len */
- ret = rte_get_tx_ol_flag_list(PKT_TX_IP_CKSUM, buf, 0);
+ ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_IP_CKSUM, buf, 0);
if (ret != -1)
GOTO_FAIL("%s expected: -1, received = %d\n", __func__, ret);
"non-zero, buffer should not be empty");
/* Test case to check with valid mask value */
- ret = rte_get_tx_ol_flag_list(PKT_TX_UDP_CKSUM, buf, sizeof(buf));
+ ret = rte_get_tx_ol_flag_list(RTE_MBUF_F_TX_UDP_CKSUM, buf,
+ sizeof(buf));
if (ret != 0)
GOTO_FAIL("%s expected: 0, received = %d\n", __func__, ret);
uint16_t i;
const char *flag_str = NULL;
const struct flag_name rx_flags[] = {
- VAL_NAME(PKT_RX_VLAN),
- VAL_NAME(PKT_RX_RSS_HASH),
- VAL_NAME(PKT_RX_FDIR),
- VAL_NAME(PKT_RX_L4_CKSUM_BAD),
- VAL_NAME(PKT_RX_L4_CKSUM_GOOD),
- VAL_NAME(PKT_RX_L4_CKSUM_NONE),
- VAL_NAME(PKT_RX_IP_CKSUM_BAD),
- VAL_NAME(PKT_RX_IP_CKSUM_GOOD),
- VAL_NAME(PKT_RX_IP_CKSUM_NONE),
- VAL_NAME(PKT_RX_EIP_CKSUM_BAD),
- VAL_NAME(PKT_RX_VLAN_STRIPPED),
- VAL_NAME(PKT_RX_IEEE1588_PTP),
- VAL_NAME(PKT_RX_IEEE1588_TMST),
- VAL_NAME(PKT_RX_FDIR_ID),
- VAL_NAME(PKT_RX_FDIR_FLX),
- VAL_NAME(PKT_RX_QINQ_STRIPPED),
- VAL_NAME(PKT_RX_LRO),
- VAL_NAME(PKT_RX_TIMESTAMP),
- VAL_NAME(PKT_RX_SEC_OFFLOAD),
- VAL_NAME(PKT_RX_SEC_OFFLOAD_FAILED),
- VAL_NAME(PKT_RX_OUTER_L4_CKSUM_BAD),
- VAL_NAME(PKT_RX_OUTER_L4_CKSUM_GOOD),
- VAL_NAME(PKT_RX_OUTER_L4_CKSUM_INVALID),
+ VAL_NAME(RTE_MBUF_F_RX_VLAN),
+ VAL_NAME(RTE_MBUF_F_RX_RSS_HASH),
+ VAL_NAME(RTE_MBUF_F_RX_FDIR),
+ VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_BAD),
+ VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_GOOD),
+ VAL_NAME(RTE_MBUF_F_RX_L4_CKSUM_NONE),
+ VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_BAD),
+ VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_GOOD),
+ VAL_NAME(RTE_MBUF_F_RX_IP_CKSUM_NONE),
+ VAL_NAME(RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD),
+ VAL_NAME(RTE_MBUF_F_RX_VLAN_STRIPPED),
+ VAL_NAME(RTE_MBUF_F_RX_IEEE1588_PTP),
+ VAL_NAME(RTE_MBUF_F_RX_IEEE1588_TMST),
+ VAL_NAME(RTE_MBUF_F_RX_FDIR_ID),
+ VAL_NAME(RTE_MBUF_F_RX_FDIR_FLX),
+ VAL_NAME(RTE_MBUF_F_RX_QINQ_STRIPPED),
+ VAL_NAME(RTE_MBUF_F_RX_LRO),
+ VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD),
+ VAL_NAME(RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED),
+ VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD),
+ VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD),
+ VAL_NAME(RTE_MBUF_F_RX_OUTER_L4_CKSUM_INVALID),
};
/* Test case to check with valid flag */
uint16_t i;
const char *flag_str = NULL;
const struct flag_name tx_flags[] = {
- VAL_NAME(PKT_TX_VLAN),
- VAL_NAME(PKT_TX_IP_CKSUM),
- VAL_NAME(PKT_TX_TCP_CKSUM),
- VAL_NAME(PKT_TX_SCTP_CKSUM),
- VAL_NAME(PKT_TX_UDP_CKSUM),
- VAL_NAME(PKT_TX_IEEE1588_TMST),
- VAL_NAME(PKT_TX_TCP_SEG),
- VAL_NAME(PKT_TX_IPV4),
- VAL_NAME(PKT_TX_IPV6),
- VAL_NAME(PKT_TX_OUTER_IP_CKSUM),
- VAL_NAME(PKT_TX_OUTER_IPV4),
- VAL_NAME(PKT_TX_OUTER_IPV6),
- VAL_NAME(PKT_TX_TUNNEL_VXLAN),
- VAL_NAME(PKT_TX_TUNNEL_GRE),
- VAL_NAME(PKT_TX_TUNNEL_IPIP),
- VAL_NAME(PKT_TX_TUNNEL_GENEVE),
- VAL_NAME(PKT_TX_TUNNEL_MPLSINUDP),
- VAL_NAME(PKT_TX_TUNNEL_VXLAN_GPE),
- VAL_NAME(PKT_TX_TUNNEL_IP),
- VAL_NAME(PKT_TX_TUNNEL_UDP),
- VAL_NAME(PKT_TX_QINQ),
- VAL_NAME(PKT_TX_MACSEC),
- VAL_NAME(PKT_TX_SEC_OFFLOAD),
- VAL_NAME(PKT_TX_UDP_SEG),
- VAL_NAME(PKT_TX_OUTER_UDP_CKSUM),
- VAL_NAME(PKT_TX_METADATA),
+ VAL_NAME(RTE_MBUF_F_TX_VLAN),
+ VAL_NAME(RTE_MBUF_F_TX_IP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_TCP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_SCTP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_UDP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_IEEE1588_TMST),
+ VAL_NAME(RTE_MBUF_F_TX_TCP_SEG),
+ VAL_NAME(RTE_MBUF_F_TX_IPV4),
+ VAL_NAME(RTE_MBUF_F_TX_IPV6),
+ VAL_NAME(RTE_MBUF_F_TX_OUTER_IP_CKSUM),
+ VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV4),
+ VAL_NAME(RTE_MBUF_F_TX_OUTER_IPV6),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GRE),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IPIP),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_GENEVE),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_MPLSINUDP),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_IP),
+ VAL_NAME(RTE_MBUF_F_TX_TUNNEL_UDP),
+ VAL_NAME(RTE_MBUF_F_TX_QINQ),
+ VAL_NAME(RTE_MBUF_F_TX_MACSEC),
+ VAL_NAME(RTE_MBUF_F_TX_SEC_OFFLOAD),
+ VAL_NAME(RTE_MBUF_F_TX_UDP_SEG),
+ VAL_NAME(RTE_MBUF_F_TX_OUTER_UDP_CKSUM),
};
/* Test case to check with valid flag */
/* test to validate if IP checksum is counted only for IPV4 packet */
/* set both IP checksum and IPV6 flags */
- ol_flags |= PKT_TX_IP_CKSUM;
- ol_flags |= PKT_TX_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_CKSUM_IPV6_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
ol_flags = 0;
/* test to validate if IP type is set when required */
- ol_flags |= PKT_TX_L4_MASK;
+ ol_flags |= RTE_MBUF_F_TX_L4_MASK;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
GOTO_FAIL("%s failed: IP type is not set.\n", __func__);
/* test if IP type is set when TCP SEG is on */
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_NOT_SET",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
ol_flags = 0;
/* test to confirm IP type (IPV4/IPV6) is set */
- ol_flags = PKT_TX_L4_MASK;
- ol_flags |= PKT_TX_IPV6;
+ ol_flags = RTE_MBUF_F_TX_L4_MASK;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
if (test_mbuf_validate_tx_offload("MBUF_TEST_IP_TYPE_SET",
pktmbuf_pool,
ol_flags, 0, 0) < 0)
ol_flags = 0;
/* test to check TSO segment size is non-zero */
- ol_flags |= PKT_TX_IPV4;
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_IPV4;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
/* set 0 tso segment size */
if (test_mbuf_validate_tx_offload("MBUF_TEST_NULL_TSO_SEGSZ",
pktmbuf_pool,
ol_flags, 0, -EINVAL) < 0)
GOTO_FAIL("%s failed: tso segment size is null.\n", __func__);
- /* retain IPV4 and PKT_TX_TCP_SEG mask */
+ /* retain IPV4 and RTE_MBUF_F_TX_TCP_SEG mask */
/* set valid tso segment size but IP CKSUM not set */
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_NOT_SET",
pktmbuf_pool,
/* test to validate if IP checksum is set for TSO capability */
/* retain IPV4, TCP_SEG, tso_seg size */
- ol_flags |= PKT_TX_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IP_CKSUM_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
/* test to confirm TSO for IPV6 type */
ol_flags = 0;
- ol_flags |= PKT_TX_IPV6;
- ol_flags |= PKT_TX_TCP_SEG;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
if (test_mbuf_validate_tx_offload("MBUF_TEST_TSO_IPV6_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
ol_flags = 0;
/* test if outer IP checksum set for non outer IPv4 packet */
- ol_flags |= PKT_TX_IPV6;
- ol_flags |= PKT_TX_OUTER_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_NOT_SET",
pktmbuf_pool,
ol_flags, 512, -EINVAL) < 0)
ol_flags = 0;
/* test to confirm outer IP checksum is set for outer IPV4 packet */
- ol_flags |= PKT_TX_OUTER_IP_CKSUM;
- ol_flags |= PKT_TX_OUTER_IPV4;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IP_CKSUM;
+ ol_flags |= RTE_MBUF_F_TX_OUTER_IPV4;
if (test_mbuf_validate_tx_offload("MBUF_TEST_OUTER_IPV4_SET",
pktmbuf_pool,
ol_flags, 512, 0) < 0)
NULL);
if (data_copy == NULL)
GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
- if (strlen(data_copy) != MBUF_TEST_DATA_LEN2 - 5)
- GOTO_FAIL("%s: Incorrect data length!\n", __func__);
for (off = 0; off < MBUF_TEST_DATA_LEN2 - 5; off++) {
if (data_copy[off] != (char)0xcc)
GOTO_FAIL("Data corrupted at offset %u", off);
data_copy = rte_pktmbuf_read(m, hdr_len, 0, NULL);
if (data_copy == NULL)
GOTO_FAIL("%s: Error in reading packet data!\n", __func__);
- if (strlen(data_copy) != MBUF_TEST_DATA_LEN2)
- GOTO_FAIL("%s: Corrupted data content!\n", __func__);
for (off = 0; off < MBUF_TEST_DATA_LEN2; off++) {
if (data_copy[off] != (char)0xcc)
GOTO_FAIL("Data corrupted at offset %u", off);
/* Define a free call back function to be used for external buffer */
static void
-ext_buf_free_callback_fn(void *addr __rte_unused, void *opaque)
+ext_buf_free_callback_fn(void *addr, void *opaque)
{
- void *ext_buf_addr = opaque;
+ bool *freed = opaque;
- if (ext_buf_addr == NULL) {
+ if (addr == NULL) {
printf("External buffer address is invalid\n");
return;
}
- rte_free(ext_buf_addr);
- ext_buf_addr = NULL;
+ rte_free(addr);
+ *freed = true;
printf("External buffer freed via callback\n");
}
void *ext_buf_addr = NULL;
uint16_t buf_len = EXT_BUF_TEST_DATA_LEN +
sizeof(struct rte_mbuf_ext_shared_info);
+ bool freed = false;
/* alloc a mbuf */
m = rte_pktmbuf_alloc(pktmbuf_pool);
GOTO_FAIL("%s: External buffer allocation failed\n", __func__);
ret_shinfo = rte_pktmbuf_ext_shinfo_init_helper(ext_buf_addr, &buf_len,
- ext_buf_free_callback_fn, ext_buf_addr);
+ ext_buf_free_callback_fn, &freed);
if (ret_shinfo == NULL)
GOTO_FAIL("%s: Shared info initialization failed!\n", __func__);
if (rte_mbuf_refcnt_read(m) != 1)
GOTO_FAIL("%s: Invalid refcnt in mbuf\n", __func__);
- buf_iova = rte_mempool_virt2iova(ext_buf_addr);
+ buf_iova = rte_mem_virt2iova(ext_buf_addr);
rte_pktmbuf_attach_extbuf(m, ext_buf_addr, buf_iova, buf_len,
ret_shinfo);
- if (m->ol_flags != EXT_ATTACHED_MBUF)
+ if (m->ol_flags != RTE_MBUF_F_EXTERNAL)
GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
__func__);
/* attach the same external buffer to the cloned mbuf */
rte_pktmbuf_attach_extbuf(clone, ext_buf_addr, buf_iova, buf_len,
ret_shinfo);
- if (clone->ol_flags != EXT_ATTACHED_MBUF)
+ if (clone->ol_flags != RTE_MBUF_F_EXTERNAL)
GOTO_FAIL("%s: External buffer is not attached to mbuf\n",
__func__);
if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
+ if (freed)
+ GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
/* test to manually update ext_buf_ref_cnt from 2 to 3*/
rte_mbuf_ext_refcnt_update(ret_shinfo, 1);
if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 3)
GOTO_FAIL("%s: Update ext_buf ref_cnt failed\n", __func__);
+ if (freed)
+ GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
/* reset the ext_refcnt before freeing the external buffer */
rte_mbuf_ext_refcnt_set(ret_shinfo, 2);
if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 2)
GOTO_FAIL("%s: set ext_buf ref_cnt failed\n", __func__);
+ if (freed)
+ GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
/* detach the external buffer from mbufs */
rte_pktmbuf_detach_extbuf(m);
/* check if ref cnt is decremented */
if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 1)
GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
+ if (freed)
+ GOTO_FAIL("%s: extbuf should not be freed\n", __func__);
rte_pktmbuf_detach_extbuf(clone);
- if (rte_mbuf_ext_refcnt_read(ret_shinfo) != 0)
- GOTO_FAIL("%s: Invalid ext_buf ref_cnt\n", __func__);
+ if (!freed)
+ GOTO_FAIL("%s: extbuf should be freed\n", __func__);
+ freed = false;
rte_pktmbuf_free(m);
m = NULL;
return -1;
}
+/*
+ * Test the mbuf pool with pinned external data buffers
+ * - Allocate memory zone for external buffer
+ * - Create the mbuf pool with pinned external buffer
+ * - Check the created pool with relevant mbuf pool unit tests
+ */
+static int
+test_pktmbuf_ext_pinned_buffer(struct rte_mempool *std_pool)
+{
+
+ struct rte_pktmbuf_extmem ext_mem;
+ struct rte_mempool *pinned_pool = NULL;
+ const struct rte_memzone *mz = NULL;
+
+ printf("Test mbuf pool with external pinned data buffers\n");
+
+ /* Allocate memzone for the external data buffer */
+ mz = rte_memzone_reserve("pinned_pool",
+ NB_MBUF * MBUF_DATA_SIZE,
+ SOCKET_ID_ANY,
+ RTE_MEMZONE_2MB | RTE_MEMZONE_SIZE_HINT_ONLY);
+ if (mz == NULL)
+ GOTO_FAIL("%s: Memzone allocation failed\n", __func__);
+
+ /* Create the mbuf pool with pinned external data buffer */
+ ext_mem.buf_ptr = mz->addr;
+ ext_mem.buf_iova = mz->iova;
+ ext_mem.buf_len = mz->len;
+ ext_mem.elt_size = MBUF_DATA_SIZE;
+
+ pinned_pool = rte_pktmbuf_pool_create_extbuf("test_pinned_pool",
+ NB_MBUF, MEMPOOL_CACHE_SIZE, 0,
+ MBUF_DATA_SIZE, SOCKET_ID_ANY,
+ &ext_mem, 1);
+ if (pinned_pool == NULL)
+ GOTO_FAIL("%s: Mbuf pool with pinned external"
+ " buffer creation failed\n", __func__);
+ /* test multiple mbuf alloc */
+ if (test_pktmbuf_pool(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_mbuf_pool(pinned) failed\n",
+ __func__);
+
+ /* do it another time to check that all mbufs were freed */
+ if (test_pktmbuf_pool(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_mbuf_pool(pinned) failed (2)\n",
+ __func__);
+
+ /* test that the data pointer on a packet mbuf is set properly */
+ if (test_pktmbuf_pool_ptr(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_pktmbuf_pool_ptr(pinned) failed\n",
+ __func__);
+
+ /* test data manipulation in mbuf with non-ascii data */
+ if (test_pktmbuf_with_non_ascii_data(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_pktmbuf_with_non_ascii_data(pinned)"
+ " failed\n", __func__);
+
+ /* test free pktmbuf segment one by one */
+ if (test_pktmbuf_free_segment(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_pktmbuf_free_segment(pinned) failed\n",
+ __func__);
+
+ if (testclone_testupdate_testdetach(pinned_pool, std_pool) < 0)
+ GOTO_FAIL("%s: testclone_and_testupdate(pinned) failed\n",
+ __func__);
+
+ if (test_pktmbuf_copy(pinned_pool, std_pool) < 0)
+ GOTO_FAIL("%s: test_pktmbuf_copy(pinned) failed\n",
+ __func__);
+
+ if (test_failing_mbuf_sanity_check(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_failing_mbuf_sanity_check(pinned)"
+ " failed\n", __func__);
+
+ if (test_mbuf_linearize_check(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_mbuf_linearize_check(pinned) failed\n",
+ __func__);
+
+ /* test for allocating a bulk of mbufs with various sizes */
+ if (test_pktmbuf_alloc_bulk(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_rte_pktmbuf_alloc_bulk(pinned) failed\n",
+ __func__);
+
+ /* test for allocating a bulk of mbufs with various sizes */
+ if (test_neg_pktmbuf_alloc_bulk(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_neg_rte_pktmbuf_alloc_bulk(pinned)"
+ " failed\n", __func__);
+
+ /* test to read mbuf packet */
+ if (test_pktmbuf_read(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_rte_pktmbuf_read(pinned) failed\n",
+ __func__);
+
+ /* test to read mbuf packet from offset */
+ if (test_pktmbuf_read_from_offset(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_rte_pktmbuf_read_from_offset(pinned)"
+ " failed\n", __func__);
+
+ /* test to read data from chain of mbufs with data segments */
+ if (test_pktmbuf_read_from_chain(pinned_pool) < 0)
+ GOTO_FAIL("%s: test_rte_pktmbuf_read_from_chain(pinned)"
+ " failed\n", __func__);
+
+ RTE_SET_USED(std_pool);
+ rte_mempool_free(pinned_pool);
+ rte_memzone_free(mz);
+ return 0;
+
+fail:
+ rte_mempool_free(pinned_pool);
+ rte_memzone_free(mz);
+ return -1;
+}
+
static int
test_mbuf_dyn(struct rte_mempool *pktmbuf_pool)
{
.align = 3,
.flags = 0,
};
+ const struct rte_mbuf_dynfield dynfield_fail_flag = {
+ .name = "test-dynfield",
+ .size = sizeof(uint8_t),
+ .align = __alignof__(uint8_t),
+ .flags = 1,
+ };
+ const struct rte_mbuf_dynflag dynflag_fail_flag = {
+ .name = "test-dynflag",
+ .flags = 1,
+ };
const struct rte_mbuf_dynflag dynflag = {
.name = "test-dynflag",
.flags = 0,
offset3 = rte_mbuf_dynfield_register_offset(&dynfield3,
offsetof(struct rte_mbuf, dynfield1[1]));
- if (offset3 != offsetof(struct rte_mbuf, dynfield1[1]))
- GOTO_FAIL("failed to register dynamic field 3, offset=%d: %s",
- offset3, strerror(errno));
+ if (offset3 != offsetof(struct rte_mbuf, dynfield1[1])) {
+ if (rte_errno == EBUSY)
+ printf("mbuf test error skipped: dynfield is busy\n");
+ else
+ GOTO_FAIL("failed to register dynamic field 3, offset="
+ "%d: %s", offset3, strerror(errno));
+ }
printf("dynfield: offset=%d, offset2=%d, offset3=%d\n",
offset, offset2, offset3);
if (ret != -1)
GOTO_FAIL("dynamic field creation should fail (not avail)");
+ ret = rte_mbuf_dynfield_register(&dynfield_fail_flag);
+ if (ret != -1)
+ GOTO_FAIL("dynamic field creation should fail (invalid flag)");
+
+ ret = rte_mbuf_dynflag_register(&dynflag_fail_flag);
+ if (ret != -1)
+ GOTO_FAIL("dynamic flag creation should fail (invalid flag)");
+
flag = rte_mbuf_dynflag_register(&dynflag);
if (flag == -1)
GOTO_FAIL("failed to register dynamic flag, flag=%d: %s",
flag2, strerror(errno));
flag3 = rte_mbuf_dynflag_register_bitnum(&dynflag3,
- rte_bsf64(PKT_LAST_FREE));
- if (flag3 != rte_bsf64(PKT_LAST_FREE))
- GOTO_FAIL("failed to register dynamic flag 3, flag2=%d: %s",
+ rte_bsf64(RTE_MBUF_F_LAST_FREE));
+ if (flag3 != rte_bsf64(RTE_MBUF_F_LAST_FREE))
+ GOTO_FAIL("failed to register dynamic flag 3, flag3=%d: %s",
flag3, strerror(errno));
printf("dynflag: flag=%d, flag2=%d, flag3=%d\n", flag, flag2, flag3);
return -1;
}
+/* check that m->nb_segs and m->next are reset on mbuf free */
+static int
+test_nb_segs_and_next_reset(void)
+{
+ struct rte_mbuf *m0 = NULL, *m1 = NULL, *m2 = NULL;
+ struct rte_mempool *pool = NULL;
+
+ pool = rte_pktmbuf_pool_create("test_mbuf_reset",
+ 3, 0, 0, MBUF_DATA_SIZE, SOCKET_ID_ANY);
+ if (pool == NULL)
+ GOTO_FAIL("Failed to create mbuf pool");
+
+ /* alloc mbufs */
+ m0 = rte_pktmbuf_alloc(pool);
+ m1 = rte_pktmbuf_alloc(pool);
+ m2 = rte_pktmbuf_alloc(pool);
+ if (m0 == NULL || m1 == NULL || m2 == NULL)
+ GOTO_FAIL("Failed to allocate mbuf");
+
+ /* append data in all of them */
+ if (rte_pktmbuf_append(m0, 500) == NULL ||
+ rte_pktmbuf_append(m1, 500) == NULL ||
+ rte_pktmbuf_append(m2, 500) == NULL)
+ GOTO_FAIL("Failed to append data in mbuf");
+
+ /* chain them in one mbuf m0 */
+ rte_pktmbuf_chain(m1, m2);
+ rte_pktmbuf_chain(m0, m1);
+ if (m0->nb_segs != 3 || m0->next != m1 || m1->next != m2 ||
+ m2->next != NULL) {
+ m1 = m2 = NULL;
+ GOTO_FAIL("Failed to chain mbufs");
+ }
+
+ /* split m0 chain in two, between m1 and m2 */
+ m0->nb_segs = 2;
+ m1->next = NULL;
+ m2->nb_segs = 1;
+
+ /* free the 2 mbuf chains m0 and m2 */
+ rte_pktmbuf_free(m0);
+ rte_pktmbuf_free(m2);
+
+ /* realloc the 3 mbufs */
+ m0 = rte_mbuf_raw_alloc(pool);
+ m1 = rte_mbuf_raw_alloc(pool);
+ m2 = rte_mbuf_raw_alloc(pool);
+ if (m0 == NULL || m1 == NULL || m2 == NULL)
+ GOTO_FAIL("Failed to reallocate mbuf");
+
+ /* ensure that m->next and m->nb_segs are reset allocated mbufs */
+ if (m0->nb_segs != 1 || m0->next != NULL ||
+ m1->nb_segs != 1 || m1->next != NULL ||
+ m2->nb_segs != 1 || m2->next != NULL)
+ GOTO_FAIL("nb_segs or next was not reset properly");
+
+ return 0;
+
+fail:
+ rte_mempool_free(pool);
+ return -1;
+}
+
static int
test_mbuf(void)
{
goto err;
}
- if (testclone_testupdate_testdetach(pktmbuf_pool) < 0) {
+ if (testclone_testupdate_testdetach(pktmbuf_pool, pktmbuf_pool) < 0) {
printf("testclone_and_testupdate() failed \n");
goto err;
}
- if (test_pktmbuf_copy(pktmbuf_pool) < 0) {
+ if (test_pktmbuf_copy(pktmbuf_pool, pktmbuf_pool) < 0) {
printf("test_pktmbuf_copy() failed\n");
goto err;
}
goto err;
}
+ /* test the mbuf pool with pinned external data buffers */
+ if (test_pktmbuf_ext_pinned_buffer(pktmbuf_pool) < 0) {
+ printf("test_pktmbuf_ext_pinned_buffer() failed\n");
+ goto err;
+ }
+
+ /* test reset of m->nb_segs and m->next on mbuf free */
+ if (test_nb_segs_and_next_reset() < 0) {
+ printf("test_nb_segs_and_next_reset() failed\n");
+ goto err;
+ }
+
ret = 0;
err:
rte_mempool_free(pktmbuf_pool);