+ * This function gets available ring number for zero copy rx.
+ * Only one thread will call this funciton for a paticular virtio device,
+ * so, it is designed as non-thread-safe function.
+ */
+static inline uint32_t __attribute__((always_inline))
+get_available_ring_num_zcp(struct virtio_net *dev)
+{
+ struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];
+ uint16_t avail_idx;
+
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ return (uint32_t)(avail_idx - vq->last_used_idx_res);
+}
+
+/*
+ * This function gets available ring index for zero copy rx,
+ * it will retry 'burst_rx_retry_num' times till it get enough ring index.
+ * Only one thread will call this funciton for a paticular virtio device,
+ * so, it is designed as non-thread-safe function.
+ */
+static inline uint32_t __attribute__((always_inline))
+get_available_ring_index_zcp(struct virtio_net *dev,
+ uint16_t *res_base_idx, uint32_t count)
+{
+ struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_RXQ];
+ uint16_t avail_idx;
+ uint32_t retry = 0;
+ uint16_t free_entries;
+
+ *res_base_idx = vq->last_used_idx_res;
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ free_entries = (avail_idx - *res_base_idx);
+
+ LOG_DEBUG(DATA, "(%"PRIu64") in get_available_ring_index_zcp: "
+ "avail idx: %d, "
+ "res base idx:%d, free entries:%d\n",
+ dev->device_fh, avail_idx, *res_base_idx,
+ free_entries);
+
+ /*
+ * If retry is enabled and the queue is full then we wait
+ * and retry to avoid packet loss.
+ */
+ if (enable_retry && unlikely(count > free_entries)) {
+ for (retry = 0; retry < burst_rx_retry_num; retry++) {
+ rte_delay_us(burst_rx_delay_time);
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+ free_entries = (avail_idx - *res_base_idx);
+ if (count <= free_entries)
+ break;
+ }
+ }
+
+ /*check that we have enough buffers*/
+ if (unlikely(count > free_entries))
+ count = free_entries;
+
+ if (unlikely(count == 0)) {
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") Fail in get_available_ring_index_zcp: "
+ "avail idx: %d, res base idx:%d, free entries:%d\n",
+ dev->device_fh, avail_idx,
+ *res_base_idx, free_entries);
+ return 0;
+ }
+
+ vq->last_used_idx_res = *res_base_idx + count;
+
+ return count;
+}
+
+/*
+ * This function put descriptor back to used list.
+ */
+static inline void __attribute__((always_inline))
+put_desc_to_used_list_zcp(struct vhost_virtqueue *vq, uint16_t desc_idx)
+{
+ uint16_t res_cur_idx = vq->last_used_idx;
+ vq->used->ring[res_cur_idx & (vq->size - 1)].id = (uint32_t)desc_idx;
+ vq->used->ring[res_cur_idx & (vq->size - 1)].len = 0;
+ rte_compiler_barrier();
+ *(volatile uint16_t *)&vq->used->idx += 1;
+ vq->last_used_idx += 1;
+
+ /* Kick the guest if necessary. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ eventfd_write((int)vq->kickfd, 1);
+}
+
+/*
+ * This function get available descriptor from vitio vring and un-attached mbuf
+ * from vpool->ring, and then attach them together. It needs adjust the offset
+ * for buff_addr and phys_addr accroding to PMD implementation, otherwise the
+ * frame data may be put to wrong location in mbuf.
+ */
+static inline void __attribute__((always_inline))
+attach_rxmbuf_zcp(struct virtio_net *dev)
+{
+ uint16_t res_base_idx, desc_idx;
+ uint64_t buff_addr, phys_addr;
+ struct vhost_virtqueue *vq;
+ struct vring_desc *desc;
+ struct rte_mbuf *mbuf = NULL;
+ struct vpool *vpool;
+ hpa_type addr_type;
+
+ vpool = &vpool_array[dev->vmdq_rx_q];
+ vq = dev->virtqueue[VIRTIO_RXQ];
+
+ do {
+ if (unlikely(get_available_ring_index_zcp(dev, &res_base_idx,
+ 1) != 1))
+ return;
+ desc_idx = vq->avail->ring[(res_base_idx) & (vq->size - 1)];
+
+ desc = &vq->desc[desc_idx];
+ if (desc->flags & VRING_DESC_F_NEXT) {
+ desc = &vq->desc[desc->next];
+ buff_addr = gpa_to_vva(dev, desc->addr);
+ phys_addr = gpa_to_hpa(dev, desc->addr, desc->len,
+ &addr_type);
+ } else {
+ buff_addr = gpa_to_vva(dev,
+ desc->addr + vq->vhost_hlen);
+ phys_addr = gpa_to_hpa(dev,
+ desc->addr + vq->vhost_hlen,
+ desc->len, &addr_type);
+ }
+
+ if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
+ RTE_LOG(ERR, DATA, "(%"PRIu64") Invalid frame buffer"
+ " address found when attaching RX frame buffer"
+ " address!\n", dev->device_fh);
+ put_desc_to_used_list_zcp(vq, desc_idx);
+ continue;
+ }
+
+ /*
+ * Check if the frame buffer address from guest crosses
+ * sub-region or not.
+ */
+ if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
+ RTE_LOG(ERR, DATA,
+ "(%"PRIu64") Frame buffer address cross "
+ "sub-regioin found when attaching RX frame "
+ "buffer address!\n",
+ dev->device_fh);
+ put_desc_to_used_list_zcp(vq, desc_idx);
+ continue;
+ }
+ } while (unlikely(phys_addr == 0));
+
+ rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
+ if (unlikely(mbuf == NULL)) {
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in attach_rxmbuf_zcp: "
+ "ring_sc_dequeue fail.\n",
+ dev->device_fh);
+ put_desc_to_used_list_zcp(vq, desc_idx);
+ return;
+ }
+
+ if (unlikely(vpool->buf_size > desc->len)) {
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in attach_rxmbuf_zcp: frame buffer "
+ "length(%d) of descriptor idx: %d less than room "
+ "size required: %d\n",
+ dev->device_fh, desc->len, desc_idx, vpool->buf_size);
+ put_desc_to_used_list_zcp(vq, desc_idx);
+ rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
+ return;
+ }
+
+ mbuf->buf_addr = (void *)(uintptr_t)(buff_addr - RTE_PKTMBUF_HEADROOM);
+ mbuf->pkt.data = (void *)(uintptr_t)(buff_addr);
+ mbuf->buf_physaddr = phys_addr - RTE_PKTMBUF_HEADROOM;
+ mbuf->pkt.data_len = desc->len;
+ MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
+
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in attach_rxmbuf_zcp: res base idx:%d, "
+ "descriptor idx:%d\n",
+ dev->device_fh, res_base_idx, desc_idx);
+
+ __rte_mbuf_raw_free(mbuf);
+
+ return;
+}
+
+/*
+ * Detach an attched packet mbuf -
+ * - restore original mbuf address and length values.
+ * - reset pktmbuf data and data_len to their default values.
+ * All other fields of the given packet mbuf will be left intact.
+ *
+ * @param m
+ * The attached packet mbuf.
+ */
+static inline void pktmbuf_detach_zcp(struct rte_mbuf *m)
+{
+ const struct rte_mempool *mp = m->pool;
+ void *buf = RTE_MBUF_TO_BADDR(m);
+ uint32_t buf_ofs;
+ uint32_t buf_len = mp->elt_size - sizeof(*m);
+ m->buf_physaddr = rte_mempool_virt2phy(mp, m) + sizeof(*m);
+
+ m->buf_addr = buf;
+ m->buf_len = (uint16_t)buf_len;
+
+ buf_ofs = (RTE_PKTMBUF_HEADROOM <= m->buf_len) ?
+ RTE_PKTMBUF_HEADROOM : m->buf_len;
+ m->pkt.data = (char *) m->buf_addr + buf_ofs;
+
+ m->pkt.data_len = 0;
+}
+
+/*
+ * This function is called after packets have been transimited. It fetchs mbuf
+ * from vpool->pool, detached it and put into vpool->ring. It also update the
+ * used index and kick the guest if necessary.
+ */
+static inline uint32_t __attribute__((always_inline))
+txmbuf_clean_zcp(struct virtio_net *dev, struct vpool *vpool)
+{
+ struct rte_mbuf *mbuf;
+ struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
+ uint32_t used_idx = vq->last_used_idx & (vq->size - 1);
+ uint32_t index = 0;
+ uint32_t mbuf_count = rte_mempool_count(vpool->pool);
+
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool before "
+ "clean is: %d\n",
+ dev->device_fh, mbuf_count);
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring before "
+ "clean is : %d\n",
+ dev->device_fh, rte_ring_count(vpool->ring));
+
+ for (index = 0; index < mbuf_count; index++) {
+ mbuf = __rte_mbuf_raw_alloc(vpool->pool);
+ if (likely(RTE_MBUF_INDIRECT(mbuf)))
+ pktmbuf_detach_zcp(mbuf);
+ rte_ring_sp_enqueue(vpool->ring, mbuf);
+
+ /* Update used index buffer information. */
+ vq->used->ring[used_idx].id = MBUF_HEADROOM_UINT32(mbuf);
+ vq->used->ring[used_idx].len = 0;
+
+ used_idx = (used_idx + 1) & (vq->size - 1);
+ }
+
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after "
+ "clean is: %d\n",
+ dev->device_fh, rte_mempool_count(vpool->pool));
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after "
+ "clean is : %d\n",
+ dev->device_fh, rte_ring_count(vpool->ring));
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in txmbuf_clean_zcp: before updated "
+ "vq->last_used_idx:%d\n",
+ dev->device_fh, vq->last_used_idx);
+
+ vq->last_used_idx += mbuf_count;
+
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in txmbuf_clean_zcp: after updated "
+ "vq->last_used_idx:%d\n",
+ dev->device_fh, vq->last_used_idx);
+
+ rte_compiler_barrier();
+
+ *(volatile uint16_t *)&vq->used->idx += mbuf_count;
+
+ /* Kick guest if required. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ eventfd_write((int)vq->kickfd, 1);
+
+ return 0;
+}
+
+/*
+ * This function is called when a virtio device is destroy.
+ * It fetchs mbuf from vpool->pool, and detached it, and put into vpool->ring.
+ */
+static void mbuf_destroy_zcp(struct vpool *vpool)
+{
+ struct rte_mbuf *mbuf = NULL;
+ uint32_t index, mbuf_count = rte_mempool_count(vpool->pool);
+
+ LOG_DEBUG(CONFIG,
+ "in mbuf_destroy_zcp: mbuf count in mempool before "
+ "mbuf_destroy_zcp is: %d\n",
+ mbuf_count);
+ LOG_DEBUG(CONFIG,
+ "in mbuf_destroy_zcp: mbuf count in ring before "
+ "mbuf_destroy_zcp is : %d\n",
+ rte_ring_count(vpool->ring));
+
+ for (index = 0; index < mbuf_count; index++) {
+ mbuf = __rte_mbuf_raw_alloc(vpool->pool);
+ if (likely(mbuf != NULL)) {
+ if (likely(RTE_MBUF_INDIRECT(mbuf)))
+ pktmbuf_detach_zcp(mbuf);
+ rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
+ }
+ }
+
+ LOG_DEBUG(CONFIG,
+ "in mbuf_destroy_zcp: mbuf count in mempool after "
+ "mbuf_destroy_zcp is: %d\n",
+ rte_mempool_count(vpool->pool));
+ LOG_DEBUG(CONFIG,
+ "in mbuf_destroy_zcp: mbuf count in ring after "
+ "mbuf_destroy_zcp is : %d\n",
+ rte_ring_count(vpool->ring));
+}
+
+/*
+ * This function update the use flag and counter.
+ */
+static inline uint32_t __attribute__((always_inline))
+virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
+ uint32_t count)
+{
+ struct vhost_virtqueue *vq;
+ struct vring_desc *desc;
+ struct rte_mbuf *buff;
+ /* The virtio_hdr is initialised to 0. */
+ struct virtio_net_hdr_mrg_rxbuf virtio_hdr
+ = {{0, 0, 0, 0, 0, 0}, 0};
+ uint64_t buff_hdr_addr = 0;
+ uint32_t head[MAX_PKT_BURST], packet_len = 0;
+ uint32_t head_idx, packet_success = 0;
+ uint16_t res_cur_idx;
+
+ LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+
+ if (count == 0)
+ return 0;
+
+ vq = dev->virtqueue[VIRTIO_RXQ];
+ count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
+
+ res_cur_idx = vq->last_used_idx;
+ LOG_DEBUG(DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
+ dev->device_fh, res_cur_idx, res_cur_idx + count);
+
+ /* Retrieve all of the head indexes first to avoid caching issues. */
+ for (head_idx = 0; head_idx < count; head_idx++)
+ head[head_idx] = MBUF_HEADROOM_UINT32(pkts[head_idx]);
+
+ /*Prefetch descriptor index. */
+ rte_prefetch0(&vq->desc[head[packet_success]]);
+
+ while (packet_success != count) {
+ /* Get descriptor from available ring */
+ desc = &vq->desc[head[packet_success]];
+
+ buff = pkts[packet_success];
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in dev_rx_zcp: update the used idx for "
+ "pkt[%d] descriptor idx: %d\n",
+ dev->device_fh, packet_success,
+ MBUF_HEADROOM_UINT32(buff));
+
+ PRINT_PACKET(dev,
+ (uintptr_t)(((uint64_t)(uintptr_t)buff->buf_addr)
+ + RTE_PKTMBUF_HEADROOM),
+ rte_pktmbuf_data_len(buff), 0);
+
+ /* Buffer address translation for virtio header. */
+ buff_hdr_addr = gpa_to_vva(dev, desc->addr);
+ packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
+
+ /*
+ * If the descriptors are chained the header and data are
+ * placed in separate buffers.
+ */
+ if (desc->flags & VRING_DESC_F_NEXT) {
+ desc->len = vq->vhost_hlen;
+ desc = &vq->desc[desc->next];
+ desc->len = rte_pktmbuf_data_len(buff);
+ } else {
+ desc->len = packet_len;
+ }
+
+ /* Update used ring with desc information */
+ vq->used->ring[res_cur_idx & (vq->size - 1)].id
+ = head[packet_success];
+ vq->used->ring[res_cur_idx & (vq->size - 1)].len
+ = packet_len;
+ res_cur_idx++;
+ packet_success++;
+
+ /* A header is required per buffer. */
+ rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
+ (const void *)&virtio_hdr, vq->vhost_hlen);
+
+ PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
+
+ if (likely(packet_success < count)) {
+ /* Prefetch descriptor index. */
+ rte_prefetch0(&vq->desc[head[packet_success]]);
+ }
+ }
+
+ rte_compiler_barrier();
+
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in dev_rx_zcp: before update used idx: "
+ "vq.last_used_idx: %d, vq->used->idx: %d\n",
+ dev->device_fh, vq->last_used_idx, vq->used->idx);
+
+ *(volatile uint16_t *)&vq->used->idx += count;
+ vq->last_used_idx += count;
+
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in dev_rx_zcp: after update used idx: "
+ "vq.last_used_idx: %d, vq->used->idx: %d\n",
+ dev->device_fh, vq->last_used_idx, vq->used->idx);
+
+ /* Kick the guest if necessary. */
+ if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
+ eventfd_write((int)vq->kickfd, 1);
+
+ return count;
+}
+
+/*
+ * This function routes the TX packet to the correct interface.
+ * This may be a local device or the physical port.
+ */
+static inline void __attribute__((always_inline))
+virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
+ uint32_t desc_idx, uint8_t need_copy)
+{
+ struct mbuf_table *tx_q;
+ struct rte_mbuf **m_table;
+ struct rte_mbuf *mbuf = NULL;
+ unsigned len, ret, offset = 0;
+ struct vpool *vpool;
+ struct virtio_net_data_ll *dev_ll = ll_root_used;
+ struct ether_hdr *pkt_hdr = (struct ether_hdr *)m->pkt.data;
+ uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
+
+ /*Add packet to the port tx queue*/
+ tx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q];
+ len = tx_q->len;
+
+ /* Allocate an mbuf and populate the structure. */
+ vpool = &vpool_array[MAX_QUEUES + (uint16_t)dev->vmdq_rx_q];
+ rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
+ if (unlikely(mbuf == NULL)) {
+ struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
+ RTE_LOG(ERR, DATA,
+ "(%"PRIu64") Failed to allocate memory for mbuf.\n",
+ dev->device_fh);
+ put_desc_to_used_list_zcp(vq, desc_idx);
+ return;
+ }
+
+ if (vm2vm_mode == VM2VM_HARDWARE) {
+ /* Avoid using a vlan tag from any vm for external pkt, such as
+ * vlan_tags[dev->device_fh], oterwise, it conflicts when pool
+ * selection, MAC address determines it as an external pkt
+ * which should go to network, while vlan tag determine it as
+ * a vm2vm pkt should forward to another vm. Hardware confuse
+ * such a ambiguous situation, so pkt will lost.
+ */
+ vlan_tag = external_pkt_default_vlan_tag;
+ while (dev_ll != NULL) {
+ if (likely(dev_ll->dev->ready == DEVICE_RX) &&
+ ether_addr_cmp(&(pkt_hdr->d_addr),
+ &dev_ll->dev->mac_address)) {
+
+ /*
+ * Drop the packet if the TX packet is destined
+ * for the TX device.
+ */
+ if (unlikely(dev_ll->dev->device_fh
+ == dev->device_fh)) {
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") TX: Source and destination"
+ "MAC addresses are the same. Dropping "
+ "packet.\n",
+ dev_ll->dev->device_fh);
+ MBUF_HEADROOM_UINT32(mbuf)
+ = (uint32_t)desc_idx;
+ __rte_mbuf_raw_free(mbuf);
+ return;
+ }
+
+ /*
+ * Packet length offset 4 bytes for HW vlan
+ * strip when L2 switch back.
+ */
+ offset = 4;
+ vlan_tag =
+ (uint16_t)
+ vlan_tags[(uint16_t)dev_ll->dev->device_fh];
+
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") TX: pkt to local VM device id:"
+ "(%"PRIu64") vlan tag: %d.\n",
+ dev->device_fh, dev_ll->dev->device_fh,
+ vlan_tag);
+
+ break;
+ }
+ dev_ll = dev_ll->next;
+ }
+ }
+
+ mbuf->pkt.nb_segs = m->pkt.nb_segs;
+ mbuf->pkt.next = m->pkt.next;
+ mbuf->pkt.data_len = m->pkt.data_len + offset;
+ mbuf->pkt.pkt_len = mbuf->pkt.data_len;
+ if (unlikely(need_copy)) {
+ /* Copy the packet contents to the mbuf. */
+ rte_memcpy((void *)((uint8_t *)mbuf->pkt.data),
+ (const void *) ((uint8_t *)m->pkt.data),
+ m->pkt.data_len);
+ } else {
+ mbuf->pkt.data = m->pkt.data;
+ mbuf->buf_physaddr = m->buf_physaddr;
+ mbuf->buf_addr = m->buf_addr;
+ }
+ mbuf->ol_flags = PKT_TX_VLAN_PKT;
+ mbuf->pkt.vlan_macip.f.vlan_tci = vlan_tag;
+ mbuf->pkt.vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ mbuf->pkt.vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
+
+ tx_q->m_table[len] = mbuf;
+ len++;
+
+ LOG_DEBUG(DATA,
+ "(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
+ dev->device_fh,
+ mbuf->pkt.nb_segs,
+ (mbuf->pkt.next == NULL) ? "null" : "non-null");
+
+ if (enable_stats) {
+ dev_statistics[dev->device_fh].tx_total++;
+ dev_statistics[dev->device_fh].tx++;
+ }
+
+ if (unlikely(len == MAX_PKT_BURST)) {
+ m_table = (struct rte_mbuf **)tx_q->m_table;
+ ret = rte_eth_tx_burst(ports[0],
+ (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
+
+ /*
+ * Free any buffers not handled by TX and update
+ * the port stats.
+ */
+ if (unlikely(ret < len)) {
+ do {
+ rte_pktmbuf_free(m_table[ret]);
+ } while (++ret < len);
+ }
+
+ len = 0;
+ txmbuf_clean_zcp(dev, vpool);
+ }
+
+ tx_q->len = len;
+
+ return;
+}
+
+/*
+ * This function TX all available packets in virtio TX queue for one
+ * virtio-net device. If it is first packet, it learns MAC address and
+ * setup VMDQ.
+ */
+static inline void __attribute__((always_inline))
+virtio_dev_tx_zcp(struct virtio_net *dev)
+{
+ struct rte_mbuf m;
+ struct vhost_virtqueue *vq;
+ struct vring_desc *desc;
+ uint64_t buff_addr = 0, phys_addr;
+ uint32_t head[MAX_PKT_BURST];
+ uint32_t i;
+ uint16_t free_entries, packet_success = 0;
+ uint16_t avail_idx;
+ uint8_t need_copy = 0;
+ hpa_type addr_type;
+
+ vq = dev->virtqueue[VIRTIO_TXQ];
+ avail_idx = *((volatile uint16_t *)&vq->avail->idx);
+
+ /* If there are no available buffers then return. */
+ if (vq->last_used_idx_res == avail_idx)
+ return;
+
+ LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
+
+ /* Prefetch available ring to retrieve head indexes. */
+ rte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]);
+
+ /* Get the number of free entries in the ring */
+ free_entries = (avail_idx - vq->last_used_idx_res);
+
+ /* Limit to MAX_PKT_BURST. */
+ free_entries
+ = (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries;
+
+ LOG_DEBUG(DATA, "(%"PRIu64") Buffers available %d\n",
+ dev->device_fh, free_entries);
+
+ /* Retrieve all of the head indexes first to avoid caching issues. */
+ for (i = 0; i < free_entries; i++)
+ head[i]
+ = vq->avail->ring[(vq->last_used_idx_res + i)
+ & (vq->size - 1)];
+
+ vq->last_used_idx_res += free_entries;
+
+ /* Prefetch descriptor index. */
+ rte_prefetch0(&vq->desc[head[packet_success]]);
+ rte_prefetch0(&vq->used->ring[vq->last_used_idx & (vq->size - 1)]);
+
+ while (packet_success < free_entries) {
+ desc = &vq->desc[head[packet_success]];
+
+ /* Discard first buffer as it is the virtio header */
+ desc = &vq->desc[desc->next];
+
+ /* Buffer address translation. */
+ buff_addr = gpa_to_vva(dev, desc->addr);
+ phys_addr = gpa_to_hpa(dev, desc->addr, desc->len, &addr_type);
+
+ if (likely(packet_success < (free_entries - 1)))
+ /* Prefetch descriptor index. */
+ rte_prefetch0(&vq->desc[head[packet_success + 1]]);
+
+ if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
+ RTE_LOG(ERR, DATA,
+ "(%"PRIu64") Invalid frame buffer address found"
+ "when TX packets!\n",
+ dev->device_fh);
+ packet_success++;
+ continue;
+ }
+
+ /* Prefetch buffer address. */
+ rte_prefetch0((void *)(uintptr_t)buff_addr);
+
+ /*
+ * Setup dummy mbuf. This is copied to a real mbuf if
+ * transmitted out the physical port.
+ */
+ m.pkt.data_len = desc->len;
+ m.pkt.nb_segs = 1;
+ m.pkt.next = NULL;
+ m.pkt.data = (void *)(uintptr_t)buff_addr;
+ m.buf_addr = m.pkt.data;
+ m.buf_physaddr = phys_addr;
+
+ /*
+ * Check if the frame buffer address from guest crosses
+ * sub-region or not.
+ */
+ if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
+ RTE_LOG(ERR, DATA,
+ "(%"PRIu64") Frame buffer address cross "
+ "sub-regioin found when attaching TX frame "
+ "buffer address!\n",
+ dev->device_fh);
+ need_copy = 1;
+ } else
+ need_copy = 0;
+
+ PRINT_PACKET(dev, (uintptr_t)buff_addr, desc->len, 0);
+
+ /*
+ * If this is the first received packet we need to learn
+ * the MAC and setup VMDQ
+ */
+ if (unlikely(dev->ready == DEVICE_MAC_LEARNING)) {
+ if (dev->remove || (link_vmdq(dev, &m) == -1)) {
+ /*
+ * Discard frame if device is scheduled for
+ * removal or a duplicate MAC address is found.
+ */
+ packet_success += free_entries;
+ vq->last_used_idx += packet_success;
+ break;
+ }
+ }
+
+ virtio_tx_route_zcp(dev, &m, head[packet_success], need_copy);
+ packet_success++;
+ }
+}
+
+/*
+ * This function is called by each data core. It handles all RX/TX registered
+ * with the core. For TX the specific lcore linked list is used. For RX, MAC
+ * addresses are compared with all devices in the main linked list.
+ */
+static int
+switch_worker_zcp(__attribute__((unused)) void *arg)
+{
+ struct virtio_net *dev = NULL;
+ struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
+ struct virtio_net_data_ll *dev_ll;
+ struct mbuf_table *tx_q;
+ volatile struct lcore_ll_info *lcore_ll;
+ const uint64_t drain_tsc
+ = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S
+ * BURST_TX_DRAIN_US;
+ uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
+ unsigned ret;
+ const uint16_t lcore_id = rte_lcore_id();
+ uint16_t count_in_ring, rx_count = 0;
+
+ RTE_LOG(INFO, DATA, "Procesing on Core %u started\n", lcore_id);
+
+ lcore_ll = lcore_info[lcore_id].lcore_ll;
+ prev_tsc = 0;
+
+ while (1) {
+ cur_tsc = rte_rdtsc();
+
+ /* TX burst queue drain */
+ diff_tsc = cur_tsc - prev_tsc;
+ if (unlikely(diff_tsc > drain_tsc)) {
+ /*
+ * Get mbuf from vpool.pool and detach mbuf and
+ * put back into vpool.ring.
+ */
+ dev_ll = lcore_ll->ll_root_used;
+ while ((dev_ll != NULL) && (dev_ll->dev != NULL)) {
+ /* Get virtio device ID */
+ dev = dev_ll->dev;
+
+ if (likely(!dev->remove)) {
+ tx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q];
+ if (tx_q->len) {
+ LOG_DEBUG(DATA,
+ "TX queue drained after timeout"
+ " with burst size %u\n",
+ tx_q->len);
+
+ /*
+ * Tx any packets in the queue
+ */
+ ret = rte_eth_tx_burst(
+ ports[0],
+ (uint16_t)tx_q->txq_id,
+ (struct rte_mbuf **)
+ tx_q->m_table,
+ (uint16_t)tx_q->len);
+ if (unlikely(ret < tx_q->len)) {
+ do {
+ rte_pktmbuf_free(
+ tx_q->m_table[ret]);
+ } while (++ret < tx_q->len);
+ }
+ tx_q->len = 0;
+
+ txmbuf_clean_zcp(dev,
+ &vpool_array[MAX_QUEUES+dev->vmdq_rx_q]);
+ }
+ }
+ dev_ll = dev_ll->next;
+ }
+ prev_tsc = cur_tsc;
+ }
+
+ rte_prefetch0(lcore_ll->ll_root_used);
+
+ /*
+ * Inform the configuration core that we have exited the linked
+ * list and that no devices are in use if requested.
+ */
+ if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
+ lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
+
+ /* Process devices */
+ dev_ll = lcore_ll->ll_root_used;
+
+ while ((dev_ll != NULL) && (dev_ll->dev != NULL)) {
+ dev = dev_ll->dev;
+ if (unlikely(dev->remove)) {
+ dev_ll = dev_ll->next;
+ unlink_vmdq(dev);
+ dev->ready = DEVICE_SAFE_REMOVE;
+ continue;
+ }
+
+ if (likely(dev->ready == DEVICE_RX)) {
+ uint32_t index = dev->vmdq_rx_q;
+ uint16_t i;
+ count_in_ring
+ = rte_ring_count(vpool_array[index].ring);
+ uint16_t free_entries
+ = (uint16_t)get_available_ring_num_zcp(dev);
+
+ /*
+ * Attach all mbufs in vpool.ring and put back
+ * into vpool.pool.
+ */
+ for (i = 0;
+ i < RTE_MIN(free_entries,
+ RTE_MIN(count_in_ring, MAX_PKT_BURST));
+ i++)
+ attach_rxmbuf_zcp(dev);
+
+ /* Handle guest RX */
+ rx_count = rte_eth_rx_burst(ports[0],
+ (uint16_t)dev->vmdq_rx_q, pkts_burst,
+ MAX_PKT_BURST);
+
+ if (rx_count) {
+ ret_count = virtio_dev_rx_zcp(dev,
+ pkts_burst, rx_count);
+ if (enable_stats) {
+ dev_statistics[dev->device_fh].rx_total
+ += rx_count;
+ dev_statistics[dev->device_fh].rx
+ += ret_count;
+ }
+ while (likely(rx_count)) {
+ rx_count--;
+ pktmbuf_detach_zcp(
+ pkts_burst[rx_count]);
+ rte_ring_sp_enqueue(
+ vpool_array[index].ring,
+ (void *)pkts_burst[rx_count]);
+ }
+ }
+ }
+
+ if (likely(!dev->remove))
+ /* Handle guest TX */
+ virtio_dev_tx_zcp(dev);
+
+ /* Move to the next device in the list */
+ dev_ll = dev_ll->next;
+ }
+ }
+
+ return 0;
+}
+
+
+/*
+ * Add an entry to a used linked list. A free entry must first be found
+ * in the free linked list using get_data_ll_free_entry();