- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after "
- "clean is: %d\n",
- dev->device_fh, rte_mempool_count(vpool->pool));
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after "
- "clean is : %d\n",
- dev->device_fh, rte_ring_count(vpool->ring));
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: before updated "
- "vq->last_used_idx:%d\n",
- dev->device_fh, vq->last_used_idx);
-
- vq->last_used_idx += mbuf_count;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in txmbuf_clean_zcp: after updated "
- "vq->last_used_idx:%d\n",
- dev->device_fh, vq->last_used_idx);
-
- rte_compiler_barrier();
-
- *(volatile uint16_t *)&vq->used->idx += mbuf_count;
-
- /* Kick guest if required. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
-
- return 0;
-}
-
-/*
- * This function is called when a virtio device is destroy.
- * It fetchs mbuf from vpool->pool, and detached it, and put into vpool->ring.
- */
-static void mbuf_destroy_zcp(struct vpool *vpool)
-{
- struct rte_mbuf *mbuf = NULL;
- uint32_t index, mbuf_count = rte_mempool_count(vpool->pool);
-
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in mempool before "
- "mbuf_destroy_zcp is: %d\n",
- mbuf_count);
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in ring before "
- "mbuf_destroy_zcp is : %d\n",
- rte_ring_count(vpool->ring));
-
- for (index = 0; index < mbuf_count; index++) {
- mbuf = __rte_mbuf_raw_alloc(vpool->pool);
- if (likely(mbuf != NULL)) {
- if (likely(RTE_MBUF_INDIRECT(mbuf)))
- pktmbuf_detach_zcp(mbuf);
- rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
- }
- }
-
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in mempool after "
- "mbuf_destroy_zcp is: %d\n",
- rte_mempool_count(vpool->pool));
- LOG_DEBUG(VHOST_CONFIG,
- "in mbuf_destroy_zcp: mbuf count in ring after "
- "mbuf_destroy_zcp is : %d\n",
- rte_ring_count(vpool->ring));
-}
-
-/*
- * This function update the use flag and counter.
- */
-static inline uint32_t __attribute__((always_inline))
-virtio_dev_rx_zcp(struct virtio_net *dev, struct rte_mbuf **pkts,
- uint32_t count)
-{
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- struct rte_mbuf *buff;
- /* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr
- = {{0, 0, 0, 0, 0, 0}, 0};
- uint64_t buff_hdr_addr = 0;
- uint32_t head[MAX_PKT_BURST], packet_len = 0;
- uint32_t head_idx, packet_success = 0;
- uint16_t res_cur_idx;
-
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
-
- if (count == 0)
- return 0;
-
- vq = dev->virtqueue[VIRTIO_RXQ];
- count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
-
- res_cur_idx = vq->last_used_idx;
- LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
- dev->device_fh, res_cur_idx, res_cur_idx + count);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = MBUF_HEADROOM_UINT32(pkts[head_idx]);
-
- /*Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
-
- while (packet_success != count) {
- /* Get descriptor from available ring */
- desc = &vq->desc[head[packet_success]];
-
- buff = pkts[packet_success];
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: update the used idx for "
- "pkt[%d] descriptor idx: %d\n",
- dev->device_fh, packet_success,
- MBUF_HEADROOM_UINT32(buff));
-
- PRINT_PACKET(dev,
- (uintptr_t)(((uint64_t)(uintptr_t)buff->buf_addr)
- + RTE_PKTMBUF_HEADROOM),
- rte_pktmbuf_data_len(buff), 0);
-
- /* Buffer address translation for virtio header. */
- buff_hdr_addr = gpa_to_vva(dev, desc->addr);
- packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
-
- /*
- * If the descriptors are chained the header and data are
- * placed in separate buffers.
- */
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc->len = vq->vhost_hlen;
- desc = &vq->desc[desc->next];
- desc->len = rte_pktmbuf_data_len(buff);
- } else {
- desc->len = packet_len;
- }
-
- /* Update used ring with desc information */
- vq->used->ring[res_cur_idx & (vq->size - 1)].id
- = head[packet_success];
- vq->used->ring[res_cur_idx & (vq->size - 1)].len
- = packet_len;
- res_cur_idx++;
- packet_success++;
-
- /* A header is required per buffer. */
- rte_memcpy((void *)(uintptr_t)buff_hdr_addr,
- (const void *)&virtio_hdr, vq->vhost_hlen);
-
- PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
-
- if (likely(packet_success < count)) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
- }
- }
-
- rte_compiler_barrier();
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: before update used idx: "
- "vq.last_used_idx: %d, vq->used->idx: %d\n",
- dev->device_fh, vq->last_used_idx, vq->used->idx);
-
- *(volatile uint16_t *)&vq->used->idx += count;
- vq->last_used_idx += count;
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") in dev_rx_zcp: after update used idx: "
- "vq.last_used_idx: %d, vq->used->idx: %d\n",
- dev->device_fh, vq->last_used_idx, vq->used->idx);
-
- /* Kick the guest if necessary. */
- if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
- eventfd_write((int)vq->kickfd, 1);
-
- return count;
-}
-
-/*
- * This function routes the TX packet to the correct interface.
- * This may be a local device or the physical port.
- */
-static inline void __attribute__((always_inline))
-virtio_tx_route_zcp(struct virtio_net *dev, struct rte_mbuf *m,
- uint32_t desc_idx, uint8_t need_copy)
-{
- struct mbuf_table *tx_q;
- struct rte_mbuf **m_table;
- struct rte_mbuf *mbuf = NULL;
- unsigned len, ret, offset = 0;
- struct vpool *vpool;
- struct virtio_net_data_ll *dev_ll = ll_root_used;
- struct ether_hdr *pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
- uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
- uint16_t vmdq_rx_q = ((struct vhost_dev *)dev->priv)->vmdq_rx_q;
-
- /*Add packet to the port tx queue*/
- tx_q = &tx_queue_zcp[vmdq_rx_q];
- len = tx_q->len;
-
- /* Allocate an mbuf and populate the structure. */
- vpool = &vpool_array[MAX_QUEUES + vmdq_rx_q];
- rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
- if (unlikely(mbuf == NULL)) {
- struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
- RTE_LOG(ERR, VHOST_DATA,
- "(%"PRIu64") Failed to allocate memory for mbuf.\n",
- dev->device_fh);
- put_desc_to_used_list_zcp(vq, desc_idx);
- return;
- }
-
- if (vm2vm_mode == VM2VM_HARDWARE) {
- /* Avoid using a vlan tag from any vm for external pkt, such as
- * vlan_tags[dev->device_fh], oterwise, it conflicts when pool
- * selection, MAC address determines it as an external pkt
- * which should go to network, while vlan tag determine it as
- * a vm2vm pkt should forward to another vm. Hardware confuse
- * such a ambiguous situation, so pkt will lost.
- */
- vlan_tag = external_pkt_default_vlan_tag;
- while (dev_ll != NULL) {
- if (likely(dev_ll->vdev->ready == DEVICE_RX) &&
- ether_addr_cmp(&(pkt_hdr->d_addr),
- &dev_ll->vdev->mac_address)) {
-
- /*
- * Drop the packet if the TX packet is destined
- * for the TX device.
- */
- if (unlikely(dev_ll->vdev->dev->device_fh
- == dev->device_fh)) {
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") TX: Source and destination"
- "MAC addresses are the same. Dropping "
- "packet.\n",
- dev_ll->vdev->dev->device_fh);
- MBUF_HEADROOM_UINT32(mbuf)
- = (uint32_t)desc_idx;
- __rte_mbuf_raw_free(mbuf);
- return;
- }
-
- /*
- * Packet length offset 4 bytes for HW vlan
- * strip when L2 switch back.
- */
- offset = 4;
- vlan_tag =
- (uint16_t)
- vlan_tags[(uint16_t)dev_ll->vdev->dev->device_fh];
-
- LOG_DEBUG(VHOST_DATA,
- "(%"PRIu64") TX: pkt to local VM device id:"
- "(%"PRIu64") vlan tag: %d.\n",
- dev->device_fh, dev_ll->vdev->dev->device_fh,
- vlan_tag);