+ if (internals->infinite_rx) {
+ struct pmd_process_private *pp;
+ char ring_name[NAME_MAX];
+ static uint32_t ring_number;
+ uint64_t pcap_pkt_count = 0;
+ struct rte_mbuf *bufs[1];
+ pcap_t **pcap;
+
+ pp = rte_eth_devices[pcap_q->port_id].process_private;
+ pcap = &pp->rx_pcap[pcap_q->queue_id];
+
+ if (unlikely(*pcap == NULL))
+ return -ENOENT;
+
+ pcap_pkt_count = count_packets_in_pcap(pcap, pcap_q);
+
+ snprintf(ring_name, sizeof(ring_name), "PCAP_RING%" PRIu32,
+ ring_number);
+
+ pcap_q->pkts = rte_ring_create(ring_name,
+ rte_align64pow2(pcap_pkt_count + 1), 0,
+ RING_F_SP_ENQ | RING_F_SC_DEQ);
+ ring_number++;
+ if (!pcap_q->pkts)
+ return -ENOENT;
+
+ /* Fill ring with packets from PCAP file one by one. */
+ while (eth_pcap_rx(pcap_q, bufs, 1)) {
+ /* Check for multiseg mbufs. */
+ if (bufs[0]->nb_segs != 1) {
+ infinite_rx_ring_free(pcap_q->pkts);
+ PMD_LOG(ERR,
+ "Multiseg mbufs are not supported in infinite_rx mode.");
+ return -EINVAL;
+ }
+
+ rte_ring_enqueue_bulk(pcap_q->pkts,
+ (void * const *)bufs, 1, NULL);
+ }
+
+ if (rte_ring_count(pcap_q->pkts) < pcap_pkt_count) {
+ infinite_rx_ring_free(pcap_q->pkts);
+ PMD_LOG(ERR,
+ "Not enough mbufs to accommodate packets in pcap file. "
+ "At least %" PRIu64 " mbufs per queue is required.",
+ pcap_pkt_count);
+ return -EINVAL;
+ }
+
+ /*
+ * Reset the stats for this queue since eth_pcap_rx calls above
+ * didn't result in the application receiving packets.
+ */
+ pcap_q->rx_stat.pkts = 0;
+ pcap_q->rx_stat.bytes = 0;
+ }
+