+static int
+test_mempool_events_safety(void)
+{
+#pragma push_macro("RTE_TEST_TRACE_FAILURE")
+#undef RTE_TEST_TRACE_FAILURE
+#define RTE_TEST_TRACE_FAILURE(...) do { \
+ ret = TEST_FAILED; \
+ goto exit; \
+ } while (0)
+
+ struct test_mempool_events_data data;
+ struct test_mempool_events_safety_data sdata[2];
+ struct rte_mempool *mp;
+ size_t i;
+ int ret;
+
+ /* removes itself */
+ sdata[0].api_func = rte_mempool_event_callback_unregister;
+ sdata[0].cb_func = test_mempool_events_safety_cb;
+ sdata[0].cb_user_data = &sdata[0];
+ sdata[0].ret = -1;
+ rte_mempool_event_callback_register(test_mempool_events_safety_cb,
+ &sdata[0]);
+ /* inserts a callback after itself */
+ sdata[1].api_func = rte_mempool_event_callback_register;
+ sdata[1].cb_func = test_mempool_events_cb;
+ sdata[1].cb_user_data = &data;
+ sdata[1].ret = -1;
+ rte_mempool_event_callback_register(test_mempool_events_safety_cb,
+ &sdata[1]);
+
+ mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ SOCKET_ID_ANY, 0);
+ RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
+ rte_strerror(rte_errno));
+ memset(&data, 0, sizeof(data));
+ ret = rte_mempool_populate_default(mp);
+ RTE_TEST_ASSERT_EQUAL(ret, (int)mp->size, "Failed to populate mempool: %s",
+ rte_strerror(-ret));
+
+ RTE_TEST_ASSERT_EQUAL(sdata[0].ret, 0, "Callback failed to unregister itself: %s",
+ rte_strerror(rte_errno));
+ RTE_TEST_ASSERT_EQUAL(sdata[1].ret, 0, "Failed to insert a new callback: %s",
+ rte_strerror(rte_errno));
+ RTE_TEST_ASSERT_EQUAL(data.invoked, false,
+ "Inserted callback is invoked on mempool population");
+
+ memset(&data, 0, sizeof(data));
+ sdata[0].invoked = false;
+ rte_mempool_free(mp);
+ mp = NULL;
+ RTE_TEST_ASSERT_EQUAL(sdata[0].invoked, false,
+ "Callback that unregistered itself was called");
+ RTE_TEST_ASSERT_EQUAL(sdata[1].ret, -EEXIST,
+ "New callback inserted twice");
+ RTE_TEST_ASSERT_EQUAL(data.invoked, true,
+ "Inserted callback is not invoked on mempool destruction");
+
+ rte_mempool_event_callback_unregister(test_mempool_events_cb, &data);
+ for (i = 0; i < RTE_DIM(sdata); i++)
+ rte_mempool_event_callback_unregister
+ (test_mempool_events_safety_cb, &sdata[i]);
+ ret = TEST_SUCCESS;
+
+exit:
+ /* cleanup, don't care which callbacks are already removed */
+ rte_mempool_event_callback_unregister(test_mempool_events_cb, &data);
+ for (i = 0; i < RTE_DIM(sdata); i++)
+ rte_mempool_event_callback_unregister
+ (test_mempool_events_safety_cb, &sdata[i]);
+ /* in case of failure before the planned destruction */
+ rte_mempool_free(mp);
+ return ret;
+
+#pragma pop_macro("RTE_TEST_TRACE_FAILURE")
+}
+
+#pragma push_macro("RTE_TEST_TRACE_FAILURE")
+#undef RTE_TEST_TRACE_FAILURE
+#define RTE_TEST_TRACE_FAILURE(...) do { \
+ ret = TEST_FAILED; \
+ goto exit; \
+ } while (0)
+
+static int
+test_mempool_flag_non_io_set_when_no_iova_contig_set(void)
+{
+ const struct rte_memzone *mz = NULL;
+ void *virt;
+ rte_iova_t iova;
+ size_t size = MEMPOOL_ELT_SIZE * 16;
+ struct rte_mempool *mp = NULL;
+ int ret;
+
+ mz = rte_memzone_reserve("test_mempool", size, SOCKET_ID_ANY, 0);
+ RTE_TEST_ASSERT_NOT_NULL(mz, "Cannot allocate memory");
+ virt = mz->addr;
+ iova = mz->iova;
+ mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_IOVA_CONTIG);
+ RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
+ rte_strerror(rte_errno));
+ rte_mempool_set_ops_byname(mp, rte_mbuf_best_mempool_ops(), NULL);
+
+ RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
+ "NON_IO flag is not set on an empty mempool");
+
+ /*
+ * Always use valid IOVA so that populate() has no other reason
+ * to infer that the mempool cannot be used for IO.
+ */
+ ret = rte_mempool_populate_iova(mp, virt, iova, size, NULL, NULL);
+ RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
+ rte_strerror(-ret));
+ RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
+ "NON_IO flag is not set when NO_IOVA_CONTIG is set");
+ ret = TEST_SUCCESS;
+exit:
+ rte_mempool_free(mp);
+ rte_memzone_free(mz);
+ return ret;
+}
+
+static int
+test_mempool_flag_non_io_unset_when_populated_with_valid_iova(void)
+{
+ const struct rte_memzone *mz = NULL;
+ void *virt;
+ rte_iova_t iova;
+ size_t total_size = MEMPOOL_ELT_SIZE * MEMPOOL_SIZE;
+ size_t block_size = total_size / 3;
+ struct rte_mempool *mp = NULL;
+ int ret;
+
+ /*
+ * Since objects from the pool are never used in the test,
+ * we don't care for contiguous IOVA, on the other hand,
+ * requiring it could cause spurious test failures.
+ */
+ mz = rte_memzone_reserve("test_mempool", total_size, SOCKET_ID_ANY, 0);
+ RTE_TEST_ASSERT_NOT_NULL(mz, "Cannot allocate memory");
+ virt = mz->addr;
+ iova = mz->iova;
+ mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ SOCKET_ID_ANY, 0);
+ RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
+ rte_strerror(rte_errno));
+
+ RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
+ "NON_IO flag is not set on an empty mempool");
+
+ ret = rte_mempool_populate_iova(mp, RTE_PTR_ADD(virt, 1 * block_size),
+ RTE_BAD_IOVA, block_size, NULL, NULL);
+ RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
+ rte_strerror(-ret));
+ RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
+ "NON_IO flag is not set when mempool is populated with only RTE_BAD_IOVA");
+
+ ret = rte_mempool_populate_iova(mp, virt, iova, block_size, NULL, NULL);
+ RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
+ rte_strerror(-ret));
+ RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
+ "NON_IO flag is not unset when mempool is populated with valid IOVA");
+
+ ret = rte_mempool_populate_iova(mp, RTE_PTR_ADD(virt, 2 * block_size),
+ RTE_BAD_IOVA, block_size, NULL, NULL);
+ RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
+ rte_strerror(-ret));
+ RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
+ "NON_IO flag is set even when some objects have valid IOVA");
+ ret = TEST_SUCCESS;
+
+exit:
+ rte_mempool_free(mp);
+ rte_memzone_free(mz);
+ return ret;
+}
+
+#pragma pop_macro("RTE_TEST_TRACE_FAILURE")
+
+static int