-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2010-2015 Intel Corporation
*/
#include <sys/queue.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_ip.h>
#include <rte_udp.h>
vmxnet3_cmd_ring_release(&tq->cmd_ring);
/* Release the memzone */
rte_memzone_free(tq->mz);
+ /* Release the queue */
+ rte_free(tq);
}
}
/* Release the memzone */
rte_memzone_free(rq->mz);
+
+ /* Release the queue */
+ rte_free(rq);
}
}
struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
int size;
- if (rq != NULL) {
- /* Release both the cmd_rings mbufs */
- for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
- vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
- }
+ /* Release both the cmd_rings mbufs */
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+ vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
ring0 = &rq->cmd_ring[0];
ring1 = &rq->cmd_ring[1];
*/
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
if (copy_size) {
- uint64 offset = txq->cmd_ring.next2fill *
- txq->txdata_desc_size;
+ uint64 offset =
+ (uint64)txq->cmd_ring.next2fill *
+ txq->txdata_desc_size;
gdesc->txd.addr =
rte_cpu_to_le_64(txq->data_ring.basePA +
offset);
} else {
- gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
+ gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
}
gdesc->dword[2] = dw2 | m_seg->data_len;
vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
struct rte_mbuf *mbuf)
{
- uint32_t val = 0;
+ uint32_t val;
struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
struct Vmxnet3_RxDesc *rxd =
(struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
- if (ring_id == 0)
+ if (ring_id == 0) {
+ /* Usually: One HEAD type buf per packet
+ * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+ * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+ */
+
+ /* We use single packet buffer so all heads here */
val = VMXNET3_RXD_BTYPE_HEAD;
- else
+ } else {
+ /* All BODY type buffers for 2nd ring */
val = VMXNET3_RXD_BTYPE_BODY;
+ }
+ /*
+ * Load mbuf pointer into buf_info[ring_size]
+ * buf_info structure is equivalent to cookie for virtio-virtqueue
+ */
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+ buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
+ /* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
+
+ /* After this point rxd->addr MUST not be NULL */
rxd->btype = val;
rxd->len = buf_info->len;
+ /* Flip gen bit at the end to change ownership */
rxd->gen = ring->gen;
vmxnet3_cmd_ring_adv_next2fill(ring);
vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
{
int err = 0;
- uint32_t i = 0, val = 0;
+ uint32_t i = 0;
struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
- if (ring_id == 0) {
- /* Usually: One HEAD type buf per packet
- * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
- * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
- */
-
- /* We use single packet buffer so all heads here */
- val = VMXNET3_RXD_BTYPE_HEAD;
- } else {
- /* All BODY type buffers for 2nd ring */
- val = VMXNET3_RXD_BTYPE_BODY;
- }
-
while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
- struct Vmxnet3_RxDesc *rxd;
struct rte_mbuf *mbuf;
- vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
-
- rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
/* Allocate blank mbuf for the current Rx Descriptor */
mbuf = rte_mbuf_raw_alloc(rxq->mp);
break;
}
- /*
- * Load mbuf pointer into buf_info[ring_size]
- * buf_info structure is equivalent to cookie for virtio-virtqueue
- */
- buf_info->m = mbuf;
- buf_info->len = (uint16_t)(mbuf->buf_len -
- RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
-
- /* Load Rx Descriptor with the buffer's GPA */
- rxd->addr = buf_info->bufPA;
-
- /* After this point rxd->addr MUST not be NULL */
- rxd->btype = val;
- rxd->len = buf_info->len;
- /* Flip gen bit at the end to change ownership */
- rxd->gen = ring->gen;
-
- vmxnet3_cmd_ring_adv_next2fill(ring);
+ vmxnet3_renew_desc(rxq, ring_id, mbuf);
i++;
}
return i;
}
-
-/* Receive side checksum and other offloads */
-static void
-vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
+/* MSS not provided by vmxnet3, guess one with available information */
+static uint16_t
+vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+ struct rte_mbuf *rxm)
{
- /* Check for RSS */
- if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
- rxm->ol_flags |= PKT_RX_RSS_HASH;
- rxm->hash.rss = rcd->rssHash;
- }
+ uint32_t hlen, slen;
+ struct ipv4_hdr *ipv4_hdr;
+ struct ipv6_hdr *ipv6_hdr;
+ struct tcp_hdr *tcp_hdr;
+ char *ptr;
+
+ RTE_ASSERT(rcd->tcp);
+
+ ptr = rte_pktmbuf_mtod(rxm, char *);
+ slen = rte_pktmbuf_data_len(rxm);
+ hlen = sizeof(struct ether_hdr);
- /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
if (rcd->v4) {
- struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
- struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
+ if (unlikely(slen < hlen + sizeof(struct ipv4_hdr)))
+ return hw->mtu - sizeof(struct ipv4_hdr)
+ - sizeof(struct tcp_hdr);
+
+ ipv4_hdr = (struct ipv4_hdr *)(ptr + hlen);
+ hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) *
+ IPV4_IHL_MULTIPLIER;
+ } else if (rcd->v6) {
+ if (unlikely(slen < hlen + sizeof(struct ipv6_hdr)))
+ return hw->mtu - sizeof(struct ipv6_hdr) -
+ sizeof(struct tcp_hdr);
+
+ ipv6_hdr = (struct ipv6_hdr *)(ptr + hlen);
+ hlen += sizeof(struct ipv6_hdr);
+ if (unlikely(ipv6_hdr->proto != IPPROTO_TCP)) {
+ int frag;
+
+ rte_net_skip_ip6_ext(ipv6_hdr->proto, rxm,
+ &hlen, &frag);
+ }
+ }
+
+ if (unlikely(slen < hlen + sizeof(struct tcp_hdr)))
+ return hw->mtu - hlen - sizeof(struct tcp_hdr) +
+ sizeof(struct ether_hdr);
+
+ tcp_hdr = (struct tcp_hdr *)(ptr + hlen);
+ hlen += (tcp_hdr->data_off & 0xf0) >> 2;
+
+ if (rxm->udata64 > 1)
+ return (rte_pktmbuf_pkt_len(rxm) - hlen +
+ rxm->udata64 - 1) / rxm->udata64;
+ else
+ return hw->mtu - hlen + sizeof(struct ether_hdr);
+}
+
+/* Receive side checksum and other offloads */
+static inline void
+vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
+ struct rte_mbuf *rxm, const uint8_t sop)
+{
+ uint64_t ol_flags = rxm->ol_flags;
+ uint32_t packet_type = rxm->packet_type;
+
+ /* Offloads set in sop */
+ if (sop) {
+ /* Set packet type */
+ packet_type |= RTE_PTYPE_L2_ETHER;
+
+ /* Check large packet receive */
+ if (VMXNET3_VERSION_GE_2(hw) &&
+ rcd->type == VMXNET3_CDTYPE_RXCOMP_LRO) {
+ const Vmxnet3_RxCompDescExt *rcde =
+ (const Vmxnet3_RxCompDescExt *)rcd;
+
+ rxm->tso_segsz = rcde->mss;
+ rxm->udata64 = rcde->segCnt;
+ ol_flags |= PKT_RX_LRO;
+ }
+ } else { /* Offloads set in eop */
+ /* Check for RSS */
+ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
+ ol_flags |= PKT_RX_RSS_HASH;
+ rxm->hash.rss = rcd->rssHash;
+ }
- if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
- rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
- else
- rxm->packet_type = RTE_PTYPE_L3_IPV4;
+ /* Check for hardware stripped VLAN tag */
+ if (rcd->ts) {
+ ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+ }
- if (!rcd->cnc) {
- if (!rcd->ipc)
- rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ /* Check packet type, checksum errors, etc. */
+ if (rcd->cnc) {
+ ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else {
+ if (rcd->v4) {
+ packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
+
+ if (rcd->ipc)
+ ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ else
+ ol_flags |= PKT_RX_IP_CKSUM_BAD;
+
+ if (rcd->tuc) {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (rcd->tcp)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else {
+ if (rcd->tcp) {
+ packet_type |= RTE_PTYPE_L4_TCP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (rcd->udp) {
+ packet_type |= RTE_PTYPE_L4_UDP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ } else if (rcd->v6) {
+ packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
+
+ if (rcd->tuc) {
+ ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (rcd->tcp)
+ packet_type |= RTE_PTYPE_L4_TCP;
+ else
+ packet_type |= RTE_PTYPE_L4_UDP;
+ } else {
+ if (rcd->tcp) {
+ packet_type |= RTE_PTYPE_L4_TCP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ } else if (rcd->udp) {
+ packet_type |= RTE_PTYPE_L4_UDP;
+ ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ } else {
+ packet_type |= RTE_PTYPE_UNKNOWN;
+ }
- if ((rcd->tcp || rcd->udp) && !rcd->tuc)
- rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ /* Old variants of vmxnet3 do not provide MSS */
+ if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+ rxm->tso_segsz = vmxnet3_guess_mss(hw,
+ rcd, rxm);
}
}
+
+ rxm->ol_flags = ol_flags;
+ rxm->packet_type = packet_type;
}
/*
rxm->data_off = RTE_PKTMBUF_HEADROOM;
rxm->ol_flags = 0;
rxm->vlan_tci = 0;
+ rxm->packet_type = 0;
/*
* If this is the first buffer of the received packet,
}
rxq->start_seg = rxm;
- vmxnet3_rx_offload(rcd, rxm);
+ vmxnet3_rx_offload(hw, rcd, rxm, 1);
} else {
struct rte_mbuf *start = rxq->start_seg;
if (rcd->eop) {
struct rte_mbuf *start = rxq->start_seg;
- /* Check for hardware stripped VLAN tag */
- if (rcd->ts) {
- start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
- start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
- }
-
+ vmxnet3_rx_offload(hw, rcd, start, 0);
rx_pkts[nb_rx++] = start;
rxq->start_seg = NULL;
}
}
}
+ if (unlikely(nb_rxd == 0)) {
+ uint32_t avail;
+ for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
+ avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
+ if (unlikely(avail > 0)) {
+ /* try to alloc new buf and renew descriptors */
+ vmxnet3_post_rx_bufs(rxq, ring_idx);
+ }
+ }
+ if (unlikely(rxq->shared->ctrl.updateRxProd)) {
+ for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
+ VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
+ rxq->cmd_ring[ring_idx].next2fill);
+ }
+ }
+ }
+
return nb_rx;
}
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
- txq->shared = &hw->tqd_start[queue_idx];
+ txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
txq->hw = hw;
txq->qid = queue_idx;
txq->stopped = TRUE;
/* cmd_ring initialization */
ring->base = mz->addr;
- ring->basePA = mz->phys_addr;
+ ring->basePA = mz->iova;
/* comp_ring initialization */
comp_ring->base = ring->base + ring->size;
rxq->mp = mp;
rxq->queue_id = queue_idx;
rxq->port_id = dev->data->port_id;
- rxq->shared = &hw->rqd_start[queue_idx];
+ rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
rxq->hw = hw;
rxq->qid1 = queue_idx;
rxq->qid2 = queue_idx + hw->num_rx_queues;
/* cmd_ring0 initialization */
ring0->base = mz->addr;
- ring0->basePA = mz->phys_addr;
+ ring0->basePA = mz->iova;
/* cmd_ring1 initialization */
ring1->base = ring0->base + ring0->size;