4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
47 #include <rte_cycles.h>
49 #include <rte_debug.h>
50 #include <rte_interrupts.h>
52 #include <rte_memory.h>
53 #include <rte_memzone.h>
54 #include <rte_launch.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
60 #include <rte_mempool.h>
61 #include <rte_malloc.h>
63 #include <rte_ether.h>
64 #include <rte_ethdev.h>
65 #include <rte_prefetch.h>
70 #include <rte_string_fns.h>
71 #include <rte_errno.h>
74 #include "base/vmxnet3_defs.h"
75 #include "vmxnet3_ring.h"
77 #include "vmxnet3_logs.h"
78 #include "vmxnet3_ethdev.h"
80 #define VMXNET3_TX_OFFLOAD_MASK ( \
85 #define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
86 (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
88 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
90 static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
91 static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
92 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
93 static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
94 static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
97 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
99 vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
107 "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
108 rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
110 "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
111 (unsigned long)rxq->cmd_ring[0].basePA,
112 (unsigned long)rxq->cmd_ring[1].basePA,
113 (unsigned long)rxq->comp_ring.basePA);
115 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
117 "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
118 (uint32_t)rxq->cmd_ring[0].size, avail,
119 rxq->comp_ring.next2proc,
120 rxq->cmd_ring[0].size - avail);
122 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
123 PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
124 (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
125 rxq->cmd_ring[1].size - avail);
130 vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
137 PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
138 txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
139 PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
140 (unsigned long)txq->cmd_ring.basePA,
141 (unsigned long)txq->comp_ring.basePA,
142 (unsigned long)txq->data_ring.basePA);
144 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
145 PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
146 (uint32_t)txq->cmd_ring.size, avail,
147 txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
152 vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
154 while (ring->next2comp != ring->next2fill) {
155 /* No need to worry about desc ownership, device is quiesced by now. */
156 vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
159 rte_pktmbuf_free(buf_info->m);
164 vmxnet3_cmd_ring_adv_next2comp(ring);
169 vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
173 for (i = 0; i < ring->size; i++) {
174 /* No need to worry about desc ownership, device is quiesced by now. */
175 vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
178 rte_pktmbuf_free_seg(buf_info->m);
183 vmxnet3_cmd_ring_adv_next2comp(ring);
188 vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
190 rte_free(ring->buf_info);
191 ring->buf_info = NULL;
195 vmxnet3_dev_tx_queue_release(void *txq)
197 vmxnet3_tx_queue_t *tq = txq;
201 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
202 /* Release the cmd_ring */
203 vmxnet3_cmd_ring_release(&tq->cmd_ring);
204 /* Release the memzone */
205 rte_memzone_free(tq->mz);
210 vmxnet3_dev_rx_queue_release(void *rxq)
213 vmxnet3_rx_queue_t *rq = rxq;
217 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
218 vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
220 /* Release both the cmd_rings */
221 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
222 vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
224 /* Release the memzone */
225 rte_memzone_free(rq->mz);
230 vmxnet3_dev_tx_queue_reset(void *txq)
232 vmxnet3_tx_queue_t *tq = txq;
233 struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
234 struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
235 struct vmxnet3_data_ring *data_ring = &tq->data_ring;
239 /* Release the cmd_ring mbufs */
240 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
243 /* Tx vmxnet rings structure initialization*/
246 ring->gen = VMXNET3_INIT_GEN;
247 comp_ring->next2proc = 0;
248 comp_ring->gen = VMXNET3_INIT_GEN;
250 size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
251 size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
252 size += tq->txdata_desc_size * data_ring->size;
254 memset(ring->base, 0, size);
258 vmxnet3_dev_rx_queue_reset(void *rxq)
261 vmxnet3_rx_queue_t *rq = rxq;
262 struct vmxnet3_hw *hw = rq->hw;
263 struct vmxnet3_cmd_ring *ring0, *ring1;
264 struct vmxnet3_comp_ring *comp_ring;
265 struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
269 /* Release both the cmd_rings mbufs */
270 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
271 vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
274 ring0 = &rq->cmd_ring[0];
275 ring1 = &rq->cmd_ring[1];
276 comp_ring = &rq->comp_ring;
278 /* Rx vmxnet rings structure initialization */
279 ring0->next2fill = 0;
280 ring1->next2fill = 0;
281 ring0->next2comp = 0;
282 ring1->next2comp = 0;
283 ring0->gen = VMXNET3_INIT_GEN;
284 ring1->gen = VMXNET3_INIT_GEN;
285 comp_ring->next2proc = 0;
286 comp_ring->gen = VMXNET3_INIT_GEN;
288 size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
289 size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
290 if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
291 size += rq->data_desc_size * data_ring->size;
293 memset(ring0->base, 0, size);
297 vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
301 PMD_INIT_FUNC_TRACE();
303 for (i = 0; i < dev->data->nb_tx_queues; i++) {
304 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
308 vmxnet3_dev_tx_queue_reset(txq);
312 for (i = 0; i < dev->data->nb_rx_queues; i++) {
313 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
317 vmxnet3_dev_rx_queue_reset(rxq);
323 vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
326 struct rte_mbuf *mbuf;
328 /* Release cmd_ring descriptor and free mbuf */
329 RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
331 mbuf = txq->cmd_ring.buf_info[eop_idx].m;
333 rte_panic("EOP desc does not point to a valid mbuf");
334 rte_pktmbuf_free(mbuf);
336 txq->cmd_ring.buf_info[eop_idx].m = NULL;
338 while (txq->cmd_ring.next2comp != eop_idx) {
339 /* no out-of-order completion */
340 RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
341 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
345 /* Mark the txd for which tcd was generated as completed */
346 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
348 return completed + 1;
352 vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
355 vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
356 struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
357 (comp_ring->base + comp_ring->next2proc);
359 while (tcd->gen == comp_ring->gen) {
360 completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
362 vmxnet3_comp_ring_adv_next2proc(comp_ring);
363 tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
364 comp_ring->next2proc);
367 PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
371 vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
379 for (i = 0; i != nb_pkts; i++) {
381 ol_flags = m->ol_flags;
383 /* Non-TSO packet cannot occupy more than
384 * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
386 if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
387 m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
392 /* check that only supported TX offloads are requested. */
393 if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
394 (ol_flags & PKT_TX_L4_MASK) ==
396 rte_errno = -ENOTSUP;
400 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
401 ret = rte_validate_tx_offload(m);
407 ret = rte_net_intel_cksum_prepare(m);
418 vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
422 vmxnet3_tx_queue_t *txq = tx_queue;
423 struct vmxnet3_hw *hw = txq->hw;
424 Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
425 uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
427 if (unlikely(txq->stopped)) {
428 PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
432 /* Free up the comp_descriptors aggressively */
433 vmxnet3_tq_tx_complete(txq);
436 while (nb_tx < nb_pkts) {
437 Vmxnet3_GenericDesc *gdesc;
438 vmxnet3_buf_info_t *tbi;
439 uint32_t first2fill, avail, dw2;
440 struct rte_mbuf *txm = tx_pkts[nb_tx];
441 struct rte_mbuf *m_seg = txm;
443 bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
444 /* # of descriptors needed for a packet. */
445 unsigned count = txm->nb_segs;
447 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
449 /* Is command ring full? */
450 if (unlikely(avail == 0)) {
451 PMD_TX_LOG(DEBUG, "No free ring descriptors");
452 txq->stats.tx_ring_full++;
453 txq->stats.drop_total += (nb_pkts - nb_tx);
457 /* Command ring is not full but cannot handle the
458 * multi-segmented packet. Let's try the next packet
461 PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
462 "(avail %d needed %d)", avail, count);
463 txq->stats.drop_total++;
465 txq->stats.drop_tso++;
466 rte_pktmbuf_free(txm);
471 /* Drop non-TSO packet that is excessively fragmented */
472 if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
473 PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
474 "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
475 txq->stats.drop_too_many_segs++;
476 txq->stats.drop_total++;
477 rte_pktmbuf_free(txm);
482 if (txm->nb_segs == 1 &&
483 rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
484 struct Vmxnet3_TxDataDesc *tdd;
486 tdd = (struct Vmxnet3_TxDataDesc *)
487 ((uint8 *)txq->data_ring.base +
488 txq->cmd_ring.next2fill *
489 txq->txdata_desc_size);
490 copy_size = rte_pktmbuf_pkt_len(txm);
491 rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
494 /* use the previous gen bit for the SOP desc */
495 dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
496 first2fill = txq->cmd_ring.next2fill;
498 /* Remember the transmit buffer for cleanup */
499 tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
501 /* NB: the following assumes that VMXNET3 maximum
502 * transmit buffer size (16K) is greater than
503 * maximum size of mbuf segment size.
505 gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
507 uint64 offset = txq->cmd_ring.next2fill *
508 txq->txdata_desc_size;
510 rte_cpu_to_le_64(txq->data_ring.basePA +
513 gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
516 gdesc->dword[2] = dw2 | m_seg->data_len;
519 /* move to the next2fill descriptor */
520 vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
522 /* use the right gen for non-SOP desc */
523 dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
524 } while ((m_seg = m_seg->next) != NULL);
526 /* set the last buf_info for the pkt */
528 /* Update the EOP descriptor */
529 gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
531 /* Add VLAN tag if present */
532 gdesc = txq->cmd_ring.base + first2fill;
533 if (txm->ol_flags & PKT_TX_VLAN_PKT) {
535 gdesc->txd.tci = txm->vlan_tci;
539 uint16_t mss = txm->tso_segsz;
543 gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
544 gdesc->txd.om = VMXNET3_OM_TSO;
545 gdesc->txd.msscof = mss;
547 deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
548 } else if (txm->ol_flags & PKT_TX_L4_MASK) {
549 gdesc->txd.om = VMXNET3_OM_CSUM;
550 gdesc->txd.hlen = txm->l2_len + txm->l3_len;
552 switch (txm->ol_flags & PKT_TX_L4_MASK) {
553 case PKT_TX_TCP_CKSUM:
554 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
556 case PKT_TX_UDP_CKSUM:
557 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
560 PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
561 txm->ol_flags & PKT_TX_L4_MASK);
567 gdesc->txd.om = VMXNET3_OM_NONE;
568 gdesc->txd.msscof = 0;
572 /* flip the GEN bit on the SOP */
573 rte_compiler_barrier();
574 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
576 txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
580 PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
582 if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
583 txq_ctrl->txNumDeferred = 0;
584 /* Notify vSwitch that packets are available. */
585 VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
586 txq->cmd_ring.next2fill);
593 vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
594 struct rte_mbuf *mbuf)
597 struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
598 struct Vmxnet3_RxDesc *rxd =
599 (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
600 vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
603 /* Usually: One HEAD type buf per packet
604 * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
605 * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
608 /* We use single packet buffer so all heads here */
609 val = VMXNET3_RXD_BTYPE_HEAD;
611 /* All BODY type buffers for 2nd ring */
612 val = VMXNET3_RXD_BTYPE_BODY;
616 * Load mbuf pointer into buf_info[ring_size]
617 * buf_info structure is equivalent to cookie for virtio-virtqueue
620 buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
621 buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
623 /* Load Rx Descriptor with the buffer's GPA */
624 rxd->addr = buf_info->bufPA;
626 /* After this point rxd->addr MUST not be NULL */
628 rxd->len = buf_info->len;
629 /* Flip gen bit at the end to change ownership */
630 rxd->gen = ring->gen;
632 vmxnet3_cmd_ring_adv_next2fill(ring);
635 * Allocates mbufs and clusters. Post rx descriptors with buffer details
636 * so that device can receive packets in those buffers.
638 * Among the two rings, 1st ring contains buffers of type 0 and type 1.
639 * bufs_per_pkt is set such that for non-LRO cases all the buffers required
640 * by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
641 * 2nd ring contains buffers of type 1 alone. Second ring mostly be used
645 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
649 struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
651 while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
652 struct rte_mbuf *mbuf;
654 /* Allocate blank mbuf for the current Rx Descriptor */
655 mbuf = rte_mbuf_raw_alloc(rxq->mp);
656 if (unlikely(mbuf == NULL)) {
657 PMD_RX_LOG(ERR, "Error allocating mbuf");
658 rxq->stats.rx_buf_alloc_failure++;
663 vmxnet3_renew_desc(rxq, ring_id, mbuf);
667 /* Return error only if no buffers are posted at present */
668 if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
675 /* Receive side checksum and other offloads */
677 vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
680 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
681 rxm->ol_flags |= PKT_RX_RSS_HASH;
682 rxm->hash.rss = rcd->rssHash;
685 /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
687 struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
688 struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
690 if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
691 rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
693 rxm->packet_type = RTE_PTYPE_L3_IPV4;
697 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
699 if ((rcd->tcp || rcd->udp) && !rcd->tuc)
700 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
706 * Process the Rx Completion Ring of given vmxnet3_rx_queue
707 * for nb_pkts burst and return the number of packets received
710 vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
713 uint32_t nb_rxd, idx;
715 vmxnet3_rx_queue_t *rxq;
716 Vmxnet3_RxCompDesc *rcd;
717 vmxnet3_buf_info_t *rbi;
719 struct rte_mbuf *rxm = NULL;
720 struct vmxnet3_hw *hw;
730 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
732 if (unlikely(rxq->stopped)) {
733 PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
737 while (rcd->gen == rxq->comp_ring.gen) {
738 struct rte_mbuf *newm;
740 if (nb_rx >= nb_pkts)
743 newm = rte_mbuf_raw_alloc(rxq->mp);
744 if (unlikely(newm == NULL)) {
745 PMD_RX_LOG(ERR, "Error allocating mbuf");
746 rxq->stats.rx_buf_alloc_failure++;
751 ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
752 rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
753 RTE_SET_USED(rxd); /* used only for assert when enabled */
754 rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
756 PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
758 RTE_ASSERT(rcd->len <= rxd->len);
761 /* Get the packet buffer pointer from buf_info */
764 /* Clear descriptor associated buf_info to be reused */
768 /* Update the index that we received a packet */
769 rxq->cmd_ring[ring_idx].next2comp = idx;
771 /* For RCD with EOP set, check if there is frame error */
772 if (unlikely(rcd->eop && rcd->err)) {
773 rxq->stats.drop_total++;
774 rxq->stats.drop_err++;
777 rxq->stats.drop_fcs++;
778 PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
780 PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
781 (int)(rcd - (struct Vmxnet3_RxCompDesc *)
782 rxq->comp_ring.base), rcd->rxdIdx);
783 rte_pktmbuf_free_seg(rxm);
784 if (rxq->start_seg) {
785 struct rte_mbuf *start = rxq->start_seg;
787 rxq->start_seg = NULL;
788 rte_pktmbuf_free(start);
793 /* Initialize newly received packet buffer */
794 rxm->port = rxq->port_id;
797 rxm->pkt_len = (uint16_t)rcd->len;
798 rxm->data_len = (uint16_t)rcd->len;
799 rxm->data_off = RTE_PKTMBUF_HEADROOM;
804 * If this is the first buffer of the received packet,
805 * set the pointer to the first mbuf of the packet
806 * Otherwise, update the total length and the number of segments
807 * of the current scattered packet, and update the pointer to
808 * the last mbuf of the current packet.
811 RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
813 if (unlikely(rcd->len == 0)) {
814 RTE_ASSERT(rcd->eop);
817 "Rx buf was skipped. rxring[%d][%d])",
819 rte_pktmbuf_free_seg(rxm);
823 if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
824 uint8_t *rdd = rxq->data_ring.base +
825 idx * rxq->data_desc_size;
827 RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
828 rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
832 rxq->start_seg = rxm;
833 vmxnet3_rx_offload(rcd, rxm);
835 struct rte_mbuf *start = rxq->start_seg;
837 RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
839 start->pkt_len += rxm->data_len;
842 rxq->last_seg->next = rxm;
847 struct rte_mbuf *start = rxq->start_seg;
849 /* Check for hardware stripped VLAN tag */
851 start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
852 start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
855 rx_pkts[nb_rx++] = start;
856 rxq->start_seg = NULL;
860 rxq->cmd_ring[ring_idx].next2comp = idx;
861 VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
862 rxq->cmd_ring[ring_idx].size);
864 /* It's time to renew descriptors */
865 vmxnet3_renew_desc(rxq, ring_idx, newm);
866 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
867 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
868 rxq->cmd_ring[ring_idx].next2fill);
871 /* Advance to the next descriptor in comp_ring */
872 vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
874 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
876 if (nb_rxd > rxq->cmd_ring[0].size) {
877 PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
878 " relinquish control.");
883 if (unlikely(nb_rxd == 0)) {
885 for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
886 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
887 if (unlikely(avail > 0)) {
888 /* try to alloc new buf and renew descriptors */
889 vmxnet3_post_rx_bufs(rxq, ring_idx);
892 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
893 for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
894 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
895 rxq->cmd_ring[ring_idx].next2fill);
904 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
907 unsigned int socket_id,
908 const struct rte_eth_txconf *tx_conf)
910 struct vmxnet3_hw *hw = dev->data->dev_private;
911 const struct rte_memzone *mz;
912 struct vmxnet3_tx_queue *txq;
913 struct vmxnet3_cmd_ring *ring;
914 struct vmxnet3_comp_ring *comp_ring;
915 struct vmxnet3_data_ring *data_ring;
918 PMD_INIT_FUNC_TRACE();
920 if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
921 ETH_TXQ_FLAGS_NOXSUMSCTP) {
922 PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
926 txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
927 RTE_CACHE_LINE_SIZE);
929 PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
933 txq->queue_id = queue_idx;
934 txq->port_id = dev->data->port_id;
935 txq->shared = &hw->tqd_start[queue_idx];
937 txq->qid = queue_idx;
939 txq->txdata_desc_size = hw->txdata_desc_size;
941 ring = &txq->cmd_ring;
942 comp_ring = &txq->comp_ring;
943 data_ring = &txq->data_ring;
945 /* Tx vmxnet ring length should be between 512-4096 */
946 if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
947 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
948 VMXNET3_DEF_TX_RING_SIZE);
950 } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
951 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
952 VMXNET3_TX_RING_MAX_SIZE);
955 ring->size = nb_desc;
956 ring->size &= ~VMXNET3_RING_SIZE_MASK;
958 comp_ring->size = data_ring->size = ring->size;
960 /* Tx vmxnet rings structure initialization*/
963 ring->gen = VMXNET3_INIT_GEN;
964 comp_ring->next2proc = 0;
965 comp_ring->gen = VMXNET3_INIT_GEN;
967 size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
968 size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
969 size += txq->txdata_desc_size * data_ring->size;
971 mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
972 VMXNET3_RING_BA_ALIGN, socket_id);
974 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
978 memset(mz->addr, 0, mz->len);
980 /* cmd_ring initialization */
981 ring->base = mz->addr;
982 ring->basePA = mz->phys_addr;
984 /* comp_ring initialization */
985 comp_ring->base = ring->base + ring->size;
986 comp_ring->basePA = ring->basePA +
987 (sizeof(struct Vmxnet3_TxDesc) * ring->size);
989 /* data_ring initialization */
990 data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
991 data_ring->basePA = comp_ring->basePA +
992 (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
994 /* cmd_ring0 buf_info allocation */
995 ring->buf_info = rte_zmalloc("tx_ring_buf_info",
996 ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
997 if (ring->buf_info == NULL) {
998 PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
1002 /* Update the data portion with txq */
1003 dev->data->tx_queues[queue_idx] = txq;
1009 vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
1012 unsigned int socket_id,
1013 __rte_unused const struct rte_eth_rxconf *rx_conf,
1014 struct rte_mempool *mp)
1016 const struct rte_memzone *mz;
1017 struct vmxnet3_rx_queue *rxq;
1018 struct vmxnet3_hw *hw = dev->data->dev_private;
1019 struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
1020 struct vmxnet3_comp_ring *comp_ring;
1021 struct vmxnet3_rx_data_ring *data_ring;
1026 PMD_INIT_FUNC_TRACE();
1028 rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
1029 RTE_CACHE_LINE_SIZE);
1031 PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
1036 rxq->queue_id = queue_idx;
1037 rxq->port_id = dev->data->port_id;
1038 rxq->shared = &hw->rqd_start[queue_idx];
1040 rxq->qid1 = queue_idx;
1041 rxq->qid2 = queue_idx + hw->num_rx_queues;
1042 rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
1043 rxq->data_desc_size = hw->rxdata_desc_size;
1044 rxq->stopped = TRUE;
1046 ring0 = &rxq->cmd_ring[0];
1047 ring1 = &rxq->cmd_ring[1];
1048 comp_ring = &rxq->comp_ring;
1049 data_ring = &rxq->data_ring;
1051 /* Rx vmxnet rings length should be between 256-4096 */
1052 if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
1053 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
1055 } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
1056 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
1059 ring0->size = nb_desc;
1060 ring0->size &= ~VMXNET3_RING_SIZE_MASK;
1061 ring1->size = ring0->size;
1064 comp_ring->size = ring0->size + ring1->size;
1065 data_ring->size = ring0->size;
1067 /* Rx vmxnet rings structure initialization */
1068 ring0->next2fill = 0;
1069 ring1->next2fill = 0;
1070 ring0->next2comp = 0;
1071 ring1->next2comp = 0;
1072 ring0->gen = VMXNET3_INIT_GEN;
1073 ring1->gen = VMXNET3_INIT_GEN;
1074 comp_ring->next2proc = 0;
1075 comp_ring->gen = VMXNET3_INIT_GEN;
1077 size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
1078 size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1079 if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
1080 size += rxq->data_desc_size * data_ring->size;
1082 mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
1083 VMXNET3_RING_BA_ALIGN, socket_id);
1085 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
1089 memset(mz->addr, 0, mz->len);
1091 /* cmd_ring0 initialization */
1092 ring0->base = mz->addr;
1093 ring0->basePA = mz->phys_addr;
1095 /* cmd_ring1 initialization */
1096 ring1->base = ring0->base + ring0->size;
1097 ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
1099 /* comp_ring initialization */
1100 comp_ring->base = ring1->base + ring1->size;
1101 comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
1104 /* data_ring initialization */
1105 if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
1107 (uint8_t *)(comp_ring->base + comp_ring->size);
1108 data_ring->basePA = comp_ring->basePA +
1109 sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1112 /* cmd_ring0-cmd_ring1 buf_info allocation */
1113 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
1115 ring = &rxq->cmd_ring[i];
1117 snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
1119 ring->buf_info = rte_zmalloc(mem_name,
1120 ring->size * sizeof(vmxnet3_buf_info_t),
1121 RTE_CACHE_LINE_SIZE);
1122 if (ring->buf_info == NULL) {
1123 PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
1128 /* Update the data portion with rxq */
1129 dev->data->rx_queues[queue_idx] = rxq;
1135 * Initializes Receive Unit
1136 * Load mbufs in rx queue in advance
1139 vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
1141 struct vmxnet3_hw *hw = dev->data->dev_private;
1146 PMD_INIT_FUNC_TRACE();
1148 for (i = 0; i < hw->num_rx_queues; i++) {
1149 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
1151 for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
1152 /* Passing 0 as alloc_num will allocate full ring */
1153 ret = vmxnet3_post_rx_bufs(rxq, j);
1156 "ERROR: Posting Rxq: %d buffers ring: %d",
1161 * Updating device with the index:next2fill to fill the
1162 * mbufs for coming packets.
1164 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
1165 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
1166 rxq->cmd_ring[j].next2fill);
1169 rxq->stopped = FALSE;
1170 rxq->start_seg = NULL;
1173 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1174 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1176 txq->stopped = FALSE;
1182 static uint8_t rss_intel_key[40] = {
1183 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1184 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1185 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1186 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1187 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1191 * Configure RSS feature
1194 vmxnet3_rss_configure(struct rte_eth_dev *dev)
1196 struct vmxnet3_hw *hw = dev->data->dev_private;
1197 struct VMXNET3_RSSConf *dev_rss_conf;
1198 struct rte_eth_rss_conf *port_rss_conf;
1202 PMD_INIT_FUNC_TRACE();
1204 dev_rss_conf = hw->rss_conf;
1205 port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1207 /* loading hashFunc */
1208 dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
1209 /* loading hashKeySize */
1210 dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
1211 /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
1212 dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
1214 if (port_rss_conf->rss_key == NULL) {
1215 /* Default hash key */
1216 port_rss_conf->rss_key = rss_intel_key;
1219 /* loading hashKey */
1220 memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
1221 dev_rss_conf->hashKeySize);
1223 /* loading indTable */
1224 for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
1225 if (j == dev->data->nb_rx_queues)
1227 dev_rss_conf->indTable[i] = j;
1230 /* loading hashType */
1231 dev_rss_conf->hashType = 0;
1232 rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
1233 if (rss_hf & ETH_RSS_IPV4)
1234 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
1235 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1236 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
1237 if (rss_hf & ETH_RSS_IPV6)
1238 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
1239 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1240 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
1242 return VMXNET3_SUCCESS;