+static inline uint32_t
+ioat_enqueue(struct rte_mbuf *pkts[], struct rte_mbuf *pkts_copy[],
+ uint32_t num, uint16_t dev_id)
+{
+ uint32_t n;
+
+ n = ioat_enqueue_packets(pkts, pkts_copy, num, dev_id);
+ if (n > 0)
+ rte_ioat_perform_ops(dev_id);
+
+ return n;
+}
+
+static inline uint32_t
+ioat_dequeue(struct rte_mbuf *src[], struct rte_mbuf *dst[], uint32_t num,
+ uint16_t dev_id)
+{
+ int32_t rc;
+ /* Dequeue the mbufs from IOAT device. Since all memory
+ * is DPDK pinned memory and therefore all addresses should
+ * be valid, we don't check for copy errors
+ */
+ rc = rte_ioat_completed_ops(dev_id, num, NULL, NULL,
+ (void *)src, (void *)dst);
+ if (rc < 0) {
+ RTE_LOG(CRIT, IOAT,
+ "rte_ioat_completed_ops(%hu) failedi, error: %d\n",
+ dev_id, rte_errno);
+ rc = 0;
+ }
+ return rc;
+}
+