1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
34 #include <rte_ether.h>
35 #include <rte_ethdev.h>
36 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
45 #include "base/vmxnet3_defs.h"
46 #include "vmxnet3_ring.h"
48 #include "vmxnet3_logs.h"
49 #include "vmxnet3_ethdev.h"
51 #define VMXNET3_TX_OFFLOAD_MASK ( \
56 #define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
57 (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
59 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
61 static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
62 static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
63 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
64 static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
65 static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
68 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
70 vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
78 "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
79 rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
81 "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
82 (unsigned long)rxq->cmd_ring[0].basePA,
83 (unsigned long)rxq->cmd_ring[1].basePA,
84 (unsigned long)rxq->comp_ring.basePA);
86 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
88 "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
89 (uint32_t)rxq->cmd_ring[0].size, avail,
90 rxq->comp_ring.next2proc,
91 rxq->cmd_ring[0].size - avail);
93 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
94 PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
95 (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
96 rxq->cmd_ring[1].size - avail);
101 vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
108 PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
109 txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
110 PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
111 (unsigned long)txq->cmd_ring.basePA,
112 (unsigned long)txq->comp_ring.basePA,
113 (unsigned long)txq->data_ring.basePA);
115 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
116 PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
117 (uint32_t)txq->cmd_ring.size, avail,
118 txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
123 vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
125 while (ring->next2comp != ring->next2fill) {
126 /* No need to worry about desc ownership, device is quiesced by now. */
127 vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
130 rte_pktmbuf_free(buf_info->m);
135 vmxnet3_cmd_ring_adv_next2comp(ring);
140 vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
144 for (i = 0; i < ring->size; i++) {
145 /* No need to worry about desc ownership, device is quiesced by now. */
146 vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
149 rte_pktmbuf_free_seg(buf_info->m);
154 vmxnet3_cmd_ring_adv_next2comp(ring);
159 vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
161 rte_free(ring->buf_info);
162 ring->buf_info = NULL;
166 vmxnet3_dev_tx_queue_release(void *txq)
168 vmxnet3_tx_queue_t *tq = txq;
172 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
173 /* Release the cmd_ring */
174 vmxnet3_cmd_ring_release(&tq->cmd_ring);
175 /* Release the memzone */
176 rte_memzone_free(tq->mz);
177 /* Release the queue */
183 vmxnet3_dev_rx_queue_release(void *rxq)
186 vmxnet3_rx_queue_t *rq = rxq;
190 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
191 vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
193 /* Release both the cmd_rings */
194 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
195 vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
197 /* Release the memzone */
198 rte_memzone_free(rq->mz);
200 /* Release the queue */
206 vmxnet3_dev_tx_queue_reset(void *txq)
208 vmxnet3_tx_queue_t *tq = txq;
209 struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
210 struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
211 struct vmxnet3_data_ring *data_ring = &tq->data_ring;
215 /* Release the cmd_ring mbufs */
216 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
219 /* Tx vmxnet rings structure initialization*/
222 ring->gen = VMXNET3_INIT_GEN;
223 comp_ring->next2proc = 0;
224 comp_ring->gen = VMXNET3_INIT_GEN;
226 size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
227 size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
228 size += tq->txdata_desc_size * data_ring->size;
230 memset(ring->base, 0, size);
234 vmxnet3_dev_rx_queue_reset(void *rxq)
237 vmxnet3_rx_queue_t *rq = rxq;
238 struct vmxnet3_hw *hw = rq->hw;
239 struct vmxnet3_cmd_ring *ring0, *ring1;
240 struct vmxnet3_comp_ring *comp_ring;
241 struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
244 /* Release both the cmd_rings mbufs */
245 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
246 vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
248 ring0 = &rq->cmd_ring[0];
249 ring1 = &rq->cmd_ring[1];
250 comp_ring = &rq->comp_ring;
252 /* Rx vmxnet rings structure initialization */
253 ring0->next2fill = 0;
254 ring1->next2fill = 0;
255 ring0->next2comp = 0;
256 ring1->next2comp = 0;
257 ring0->gen = VMXNET3_INIT_GEN;
258 ring1->gen = VMXNET3_INIT_GEN;
259 comp_ring->next2proc = 0;
260 comp_ring->gen = VMXNET3_INIT_GEN;
262 size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
263 size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
264 if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
265 size += rq->data_desc_size * data_ring->size;
267 memset(ring0->base, 0, size);
271 vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
275 PMD_INIT_FUNC_TRACE();
277 for (i = 0; i < dev->data->nb_tx_queues; i++) {
278 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
282 vmxnet3_dev_tx_queue_reset(txq);
286 for (i = 0; i < dev->data->nb_rx_queues; i++) {
287 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
291 vmxnet3_dev_rx_queue_reset(rxq);
297 vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
300 struct rte_mbuf *mbuf;
302 /* Release cmd_ring descriptor and free mbuf */
303 RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
305 mbuf = txq->cmd_ring.buf_info[eop_idx].m;
307 rte_panic("EOP desc does not point to a valid mbuf");
308 rte_pktmbuf_free(mbuf);
310 txq->cmd_ring.buf_info[eop_idx].m = NULL;
312 while (txq->cmd_ring.next2comp != eop_idx) {
313 /* no out-of-order completion */
314 RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
315 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
319 /* Mark the txd for which tcd was generated as completed */
320 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
322 return completed + 1;
326 vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
329 vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
330 struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
331 (comp_ring->base + comp_ring->next2proc);
333 while (tcd->gen == comp_ring->gen) {
334 completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
336 vmxnet3_comp_ring_adv_next2proc(comp_ring);
337 tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
338 comp_ring->next2proc);
341 PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
345 vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
353 for (i = 0; i != nb_pkts; i++) {
355 ol_flags = m->ol_flags;
357 /* Non-TSO packet cannot occupy more than
358 * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
360 if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
361 m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
366 /* check that only supported TX offloads are requested. */
367 if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
368 (ol_flags & PKT_TX_L4_MASK) ==
370 rte_errno = -ENOTSUP;
374 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
375 ret = rte_validate_tx_offload(m);
381 ret = rte_net_intel_cksum_prepare(m);
392 vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
396 vmxnet3_tx_queue_t *txq = tx_queue;
397 struct vmxnet3_hw *hw = txq->hw;
398 Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
399 uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
401 if (unlikely(txq->stopped)) {
402 PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
406 /* Free up the comp_descriptors aggressively */
407 vmxnet3_tq_tx_complete(txq);
410 while (nb_tx < nb_pkts) {
411 Vmxnet3_GenericDesc *gdesc;
412 vmxnet3_buf_info_t *tbi;
413 uint32_t first2fill, avail, dw2;
414 struct rte_mbuf *txm = tx_pkts[nb_tx];
415 struct rte_mbuf *m_seg = txm;
417 bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
418 /* # of descriptors needed for a packet. */
419 unsigned count = txm->nb_segs;
421 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
423 /* Is command ring full? */
424 if (unlikely(avail == 0)) {
425 PMD_TX_LOG(DEBUG, "No free ring descriptors");
426 txq->stats.tx_ring_full++;
427 txq->stats.drop_total += (nb_pkts - nb_tx);
431 /* Command ring is not full but cannot handle the
432 * multi-segmented packet. Let's try the next packet
435 PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
436 "(avail %d needed %d)", avail, count);
437 txq->stats.drop_total++;
439 txq->stats.drop_tso++;
440 rte_pktmbuf_free(txm);
445 /* Drop non-TSO packet that is excessively fragmented */
446 if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
447 PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
448 "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
449 txq->stats.drop_too_many_segs++;
450 txq->stats.drop_total++;
451 rte_pktmbuf_free(txm);
456 if (txm->nb_segs == 1 &&
457 rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
458 struct Vmxnet3_TxDataDesc *tdd;
460 tdd = (struct Vmxnet3_TxDataDesc *)
461 ((uint8 *)txq->data_ring.base +
462 txq->cmd_ring.next2fill *
463 txq->txdata_desc_size);
464 copy_size = rte_pktmbuf_pkt_len(txm);
465 rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
468 /* use the previous gen bit for the SOP desc */
469 dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
470 first2fill = txq->cmd_ring.next2fill;
472 /* Remember the transmit buffer for cleanup */
473 tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
475 /* NB: the following assumes that VMXNET3 maximum
476 * transmit buffer size (16K) is greater than
477 * maximum size of mbuf segment size.
479 gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
482 (uint64)txq->cmd_ring.next2fill *
483 txq->txdata_desc_size;
485 rte_cpu_to_le_64(txq->data_ring.basePA +
488 gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
491 gdesc->dword[2] = dw2 | m_seg->data_len;
494 /* move to the next2fill descriptor */
495 vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
497 /* use the right gen for non-SOP desc */
498 dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
499 } while ((m_seg = m_seg->next) != NULL);
501 /* set the last buf_info for the pkt */
503 /* Update the EOP descriptor */
504 gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
506 /* Add VLAN tag if present */
507 gdesc = txq->cmd_ring.base + first2fill;
508 if (txm->ol_flags & PKT_TX_VLAN_PKT) {
510 gdesc->txd.tci = txm->vlan_tci;
514 uint16_t mss = txm->tso_segsz;
518 gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
519 gdesc->txd.om = VMXNET3_OM_TSO;
520 gdesc->txd.msscof = mss;
522 deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
523 } else if (txm->ol_flags & PKT_TX_L4_MASK) {
524 gdesc->txd.om = VMXNET3_OM_CSUM;
525 gdesc->txd.hlen = txm->l2_len + txm->l3_len;
527 switch (txm->ol_flags & PKT_TX_L4_MASK) {
528 case PKT_TX_TCP_CKSUM:
529 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
531 case PKT_TX_UDP_CKSUM:
532 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
535 PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
536 txm->ol_flags & PKT_TX_L4_MASK);
542 gdesc->txd.om = VMXNET3_OM_NONE;
543 gdesc->txd.msscof = 0;
547 /* flip the GEN bit on the SOP */
548 rte_compiler_barrier();
549 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
551 txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
555 PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
557 if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
558 txq_ctrl->txNumDeferred = 0;
559 /* Notify vSwitch that packets are available. */
560 VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
561 txq->cmd_ring.next2fill);
568 vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
569 struct rte_mbuf *mbuf)
572 struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
573 struct Vmxnet3_RxDesc *rxd =
574 (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
575 vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
578 /* Usually: One HEAD type buf per packet
579 * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
580 * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
583 /* We use single packet buffer so all heads here */
584 val = VMXNET3_RXD_BTYPE_HEAD;
586 /* All BODY type buffers for 2nd ring */
587 val = VMXNET3_RXD_BTYPE_BODY;
591 * Load mbuf pointer into buf_info[ring_size]
592 * buf_info structure is equivalent to cookie for virtio-virtqueue
595 buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
596 buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
598 /* Load Rx Descriptor with the buffer's GPA */
599 rxd->addr = buf_info->bufPA;
601 /* After this point rxd->addr MUST not be NULL */
603 rxd->len = buf_info->len;
604 /* Flip gen bit at the end to change ownership */
605 rxd->gen = ring->gen;
607 vmxnet3_cmd_ring_adv_next2fill(ring);
610 * Allocates mbufs and clusters. Post rx descriptors with buffer details
611 * so that device can receive packets in those buffers.
613 * Among the two rings, 1st ring contains buffers of type 0 and type 1.
614 * bufs_per_pkt is set such that for non-LRO cases all the buffers required
615 * by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
616 * 2nd ring contains buffers of type 1 alone. Second ring mostly be used
620 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
624 struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
626 while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
627 struct rte_mbuf *mbuf;
629 /* Allocate blank mbuf for the current Rx Descriptor */
630 mbuf = rte_mbuf_raw_alloc(rxq->mp);
631 if (unlikely(mbuf == NULL)) {
632 PMD_RX_LOG(ERR, "Error allocating mbuf");
633 rxq->stats.rx_buf_alloc_failure++;
638 vmxnet3_renew_desc(rxq, ring_id, mbuf);
642 /* Return error only if no buffers are posted at present */
643 if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
650 /* Receive side checksum and other offloads */
652 vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
655 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
656 rxm->ol_flags |= PKT_RX_RSS_HASH;
657 rxm->hash.rss = rcd->rssHash;
660 /* Check packet type, checksum errors, etc. Only support IPv4 for now. */
662 struct ether_hdr *eth = rte_pktmbuf_mtod(rxm, struct ether_hdr *);
663 struct ipv4_hdr *ip = (struct ipv4_hdr *)(eth + 1);
665 if (((ip->version_ihl & 0xf) << 2) > (int)sizeof(struct ipv4_hdr))
666 rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT;
668 rxm->packet_type = RTE_PTYPE_L3_IPV4;
672 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
674 if ((rcd->tcp || rcd->udp) && !rcd->tuc)
675 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
681 * Process the Rx Completion Ring of given vmxnet3_rx_queue
682 * for nb_pkts burst and return the number of packets received
685 vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
688 uint32_t nb_rxd, idx;
690 vmxnet3_rx_queue_t *rxq;
691 Vmxnet3_RxCompDesc *rcd;
692 vmxnet3_buf_info_t *rbi;
694 struct rte_mbuf *rxm = NULL;
695 struct vmxnet3_hw *hw;
705 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
707 if (unlikely(rxq->stopped)) {
708 PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
712 while (rcd->gen == rxq->comp_ring.gen) {
713 struct rte_mbuf *newm;
715 if (nb_rx >= nb_pkts)
718 newm = rte_mbuf_raw_alloc(rxq->mp);
719 if (unlikely(newm == NULL)) {
720 PMD_RX_LOG(ERR, "Error allocating mbuf");
721 rxq->stats.rx_buf_alloc_failure++;
726 ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
727 rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
728 RTE_SET_USED(rxd); /* used only for assert when enabled */
729 rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
731 PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
733 RTE_ASSERT(rcd->len <= rxd->len);
736 /* Get the packet buffer pointer from buf_info */
739 /* Clear descriptor associated buf_info to be reused */
743 /* Update the index that we received a packet */
744 rxq->cmd_ring[ring_idx].next2comp = idx;
746 /* For RCD with EOP set, check if there is frame error */
747 if (unlikely(rcd->eop && rcd->err)) {
748 rxq->stats.drop_total++;
749 rxq->stats.drop_err++;
752 rxq->stats.drop_fcs++;
753 PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
755 PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
756 (int)(rcd - (struct Vmxnet3_RxCompDesc *)
757 rxq->comp_ring.base), rcd->rxdIdx);
758 rte_pktmbuf_free_seg(rxm);
759 if (rxq->start_seg) {
760 struct rte_mbuf *start = rxq->start_seg;
762 rxq->start_seg = NULL;
763 rte_pktmbuf_free(start);
768 /* Initialize newly received packet buffer */
769 rxm->port = rxq->port_id;
772 rxm->pkt_len = (uint16_t)rcd->len;
773 rxm->data_len = (uint16_t)rcd->len;
774 rxm->data_off = RTE_PKTMBUF_HEADROOM;
779 * If this is the first buffer of the received packet,
780 * set the pointer to the first mbuf of the packet
781 * Otherwise, update the total length and the number of segments
782 * of the current scattered packet, and update the pointer to
783 * the last mbuf of the current packet.
786 RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
788 if (unlikely(rcd->len == 0)) {
789 RTE_ASSERT(rcd->eop);
792 "Rx buf was skipped. rxring[%d][%d])",
794 rte_pktmbuf_free_seg(rxm);
798 if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
799 uint8_t *rdd = rxq->data_ring.base +
800 idx * rxq->data_desc_size;
802 RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
803 rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
807 rxq->start_seg = rxm;
808 vmxnet3_rx_offload(rcd, rxm);
810 struct rte_mbuf *start = rxq->start_seg;
812 RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
814 start->pkt_len += rxm->data_len;
817 rxq->last_seg->next = rxm;
822 struct rte_mbuf *start = rxq->start_seg;
824 /* Check for hardware stripped VLAN tag */
826 start->ol_flags |= (PKT_RX_VLAN |
827 PKT_RX_VLAN_STRIPPED);
828 start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
831 rx_pkts[nb_rx++] = start;
832 rxq->start_seg = NULL;
836 rxq->cmd_ring[ring_idx].next2comp = idx;
837 VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
838 rxq->cmd_ring[ring_idx].size);
840 /* It's time to renew descriptors */
841 vmxnet3_renew_desc(rxq, ring_idx, newm);
842 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
843 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
844 rxq->cmd_ring[ring_idx].next2fill);
847 /* Advance to the next descriptor in comp_ring */
848 vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
850 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
852 if (nb_rxd > rxq->cmd_ring[0].size) {
853 PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
854 " relinquish control.");
859 if (unlikely(nb_rxd == 0)) {
861 for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
862 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
863 if (unlikely(avail > 0)) {
864 /* try to alloc new buf and renew descriptors */
865 vmxnet3_post_rx_bufs(rxq, ring_idx);
868 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
869 for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
870 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
871 rxq->cmd_ring[ring_idx].next2fill);
880 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
883 unsigned int socket_id,
884 const struct rte_eth_txconf *tx_conf)
886 struct vmxnet3_hw *hw = dev->data->dev_private;
887 const struct rte_memzone *mz;
888 struct vmxnet3_tx_queue *txq;
889 struct vmxnet3_cmd_ring *ring;
890 struct vmxnet3_comp_ring *comp_ring;
891 struct vmxnet3_data_ring *data_ring;
894 PMD_INIT_FUNC_TRACE();
896 if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
897 ETH_TXQ_FLAGS_NOXSUMSCTP) {
898 PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
902 txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
903 RTE_CACHE_LINE_SIZE);
905 PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
909 txq->queue_id = queue_idx;
910 txq->port_id = dev->data->port_id;
911 txq->shared = &hw->tqd_start[queue_idx];
913 txq->qid = queue_idx;
915 txq->txdata_desc_size = hw->txdata_desc_size;
917 ring = &txq->cmd_ring;
918 comp_ring = &txq->comp_ring;
919 data_ring = &txq->data_ring;
921 /* Tx vmxnet ring length should be between 512-4096 */
922 if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
923 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
924 VMXNET3_DEF_TX_RING_SIZE);
926 } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
927 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
928 VMXNET3_TX_RING_MAX_SIZE);
931 ring->size = nb_desc;
932 ring->size &= ~VMXNET3_RING_SIZE_MASK;
934 comp_ring->size = data_ring->size = ring->size;
936 /* Tx vmxnet rings structure initialization*/
939 ring->gen = VMXNET3_INIT_GEN;
940 comp_ring->next2proc = 0;
941 comp_ring->gen = VMXNET3_INIT_GEN;
943 size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
944 size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
945 size += txq->txdata_desc_size * data_ring->size;
947 mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
948 VMXNET3_RING_BA_ALIGN, socket_id);
950 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
954 memset(mz->addr, 0, mz->len);
956 /* cmd_ring initialization */
957 ring->base = mz->addr;
958 ring->basePA = mz->iova;
960 /* comp_ring initialization */
961 comp_ring->base = ring->base + ring->size;
962 comp_ring->basePA = ring->basePA +
963 (sizeof(struct Vmxnet3_TxDesc) * ring->size);
965 /* data_ring initialization */
966 data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
967 data_ring->basePA = comp_ring->basePA +
968 (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
970 /* cmd_ring0 buf_info allocation */
971 ring->buf_info = rte_zmalloc("tx_ring_buf_info",
972 ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
973 if (ring->buf_info == NULL) {
974 PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
978 /* Update the data portion with txq */
979 dev->data->tx_queues[queue_idx] = txq;
985 vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
988 unsigned int socket_id,
989 __rte_unused const struct rte_eth_rxconf *rx_conf,
990 struct rte_mempool *mp)
992 const struct rte_memzone *mz;
993 struct vmxnet3_rx_queue *rxq;
994 struct vmxnet3_hw *hw = dev->data->dev_private;
995 struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
996 struct vmxnet3_comp_ring *comp_ring;
997 struct vmxnet3_rx_data_ring *data_ring;
1002 PMD_INIT_FUNC_TRACE();
1004 rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
1005 RTE_CACHE_LINE_SIZE);
1007 PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
1012 rxq->queue_id = queue_idx;
1013 rxq->port_id = dev->data->port_id;
1014 rxq->shared = &hw->rqd_start[queue_idx];
1016 rxq->qid1 = queue_idx;
1017 rxq->qid2 = queue_idx + hw->num_rx_queues;
1018 rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
1019 rxq->data_desc_size = hw->rxdata_desc_size;
1020 rxq->stopped = TRUE;
1022 ring0 = &rxq->cmd_ring[0];
1023 ring1 = &rxq->cmd_ring[1];
1024 comp_ring = &rxq->comp_ring;
1025 data_ring = &rxq->data_ring;
1027 /* Rx vmxnet rings length should be between 256-4096 */
1028 if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
1029 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
1031 } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
1032 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
1035 ring0->size = nb_desc;
1036 ring0->size &= ~VMXNET3_RING_SIZE_MASK;
1037 ring1->size = ring0->size;
1040 comp_ring->size = ring0->size + ring1->size;
1041 data_ring->size = ring0->size;
1043 /* Rx vmxnet rings structure initialization */
1044 ring0->next2fill = 0;
1045 ring1->next2fill = 0;
1046 ring0->next2comp = 0;
1047 ring1->next2comp = 0;
1048 ring0->gen = VMXNET3_INIT_GEN;
1049 ring1->gen = VMXNET3_INIT_GEN;
1050 comp_ring->next2proc = 0;
1051 comp_ring->gen = VMXNET3_INIT_GEN;
1053 size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
1054 size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1055 if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
1056 size += rxq->data_desc_size * data_ring->size;
1058 mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
1059 VMXNET3_RING_BA_ALIGN, socket_id);
1061 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
1065 memset(mz->addr, 0, mz->len);
1067 /* cmd_ring0 initialization */
1068 ring0->base = mz->addr;
1069 ring0->basePA = mz->iova;
1071 /* cmd_ring1 initialization */
1072 ring1->base = ring0->base + ring0->size;
1073 ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
1075 /* comp_ring initialization */
1076 comp_ring->base = ring1->base + ring1->size;
1077 comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
1080 /* data_ring initialization */
1081 if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
1083 (uint8_t *)(comp_ring->base + comp_ring->size);
1084 data_ring->basePA = comp_ring->basePA +
1085 sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1088 /* cmd_ring0-cmd_ring1 buf_info allocation */
1089 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
1091 ring = &rxq->cmd_ring[i];
1093 snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
1095 ring->buf_info = rte_zmalloc(mem_name,
1096 ring->size * sizeof(vmxnet3_buf_info_t),
1097 RTE_CACHE_LINE_SIZE);
1098 if (ring->buf_info == NULL) {
1099 PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
1104 /* Update the data portion with rxq */
1105 dev->data->rx_queues[queue_idx] = rxq;
1111 * Initializes Receive Unit
1112 * Load mbufs in rx queue in advance
1115 vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
1117 struct vmxnet3_hw *hw = dev->data->dev_private;
1122 PMD_INIT_FUNC_TRACE();
1124 for (i = 0; i < hw->num_rx_queues; i++) {
1125 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
1127 for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
1128 /* Passing 0 as alloc_num will allocate full ring */
1129 ret = vmxnet3_post_rx_bufs(rxq, j);
1132 "ERROR: Posting Rxq: %d buffers ring: %d",
1137 * Updating device with the index:next2fill to fill the
1138 * mbufs for coming packets.
1140 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
1141 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
1142 rxq->cmd_ring[j].next2fill);
1145 rxq->stopped = FALSE;
1146 rxq->start_seg = NULL;
1149 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1150 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1152 txq->stopped = FALSE;
1158 static uint8_t rss_intel_key[40] = {
1159 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1160 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1161 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1162 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1163 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1167 * Configure RSS feature
1170 vmxnet3_rss_configure(struct rte_eth_dev *dev)
1172 struct vmxnet3_hw *hw = dev->data->dev_private;
1173 struct VMXNET3_RSSConf *dev_rss_conf;
1174 struct rte_eth_rss_conf *port_rss_conf;
1178 PMD_INIT_FUNC_TRACE();
1180 dev_rss_conf = hw->rss_conf;
1181 port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1183 /* loading hashFunc */
1184 dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
1185 /* loading hashKeySize */
1186 dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
1187 /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
1188 dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
1190 if (port_rss_conf->rss_key == NULL) {
1191 /* Default hash key */
1192 port_rss_conf->rss_key = rss_intel_key;
1195 /* loading hashKey */
1196 memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
1197 dev_rss_conf->hashKeySize);
1199 /* loading indTable */
1200 for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
1201 if (j == dev->data->nb_rx_queues)
1203 dev_rss_conf->indTable[i] = j;
1206 /* loading hashType */
1207 dev_rss_conf->hashType = 0;
1208 rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
1209 if (rss_hf & ETH_RSS_IPV4)
1210 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
1211 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1212 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
1213 if (rss_hf & ETH_RSS_IPV6)
1214 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
1215 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1216 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
1218 return VMXNET3_SUCCESS;