1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
34 #include <rte_ether.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
45 #include "base/vmxnet3_defs.h"
46 #include "vmxnet3_ring.h"
48 #include "vmxnet3_logs.h"
49 #include "vmxnet3_ethdev.h"
51 #define VMXNET3_TX_OFFLOAD_MASK ( \
56 #define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
57 (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
59 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
61 static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
62 static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
63 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
64 static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
65 static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
68 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
70 vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
78 "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
79 rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
81 "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
82 (unsigned long)rxq->cmd_ring[0].basePA,
83 (unsigned long)rxq->cmd_ring[1].basePA,
84 (unsigned long)rxq->comp_ring.basePA);
86 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
88 "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
89 (uint32_t)rxq->cmd_ring[0].size, avail,
90 rxq->comp_ring.next2proc,
91 rxq->cmd_ring[0].size - avail);
93 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
94 PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
95 (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
96 rxq->cmd_ring[1].size - avail);
101 vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
108 PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
109 txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
110 PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
111 (unsigned long)txq->cmd_ring.basePA,
112 (unsigned long)txq->comp_ring.basePA,
113 (unsigned long)txq->data_ring.basePA);
115 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
116 PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
117 (uint32_t)txq->cmd_ring.size, avail,
118 txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
123 vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
125 while (ring->next2comp != ring->next2fill) {
126 /* No need to worry about desc ownership, device is quiesced by now. */
127 vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
130 rte_pktmbuf_free(buf_info->m);
135 vmxnet3_cmd_ring_adv_next2comp(ring);
140 vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
144 for (i = 0; i < ring->size; i++) {
145 /* No need to worry about desc ownership, device is quiesced by now. */
146 vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
149 rte_pktmbuf_free_seg(buf_info->m);
154 vmxnet3_cmd_ring_adv_next2comp(ring);
159 vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
161 rte_free(ring->buf_info);
162 ring->buf_info = NULL;
166 vmxnet3_dev_tx_queue_release(void *txq)
168 vmxnet3_tx_queue_t *tq = txq;
172 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
173 /* Release the cmd_ring */
174 vmxnet3_cmd_ring_release(&tq->cmd_ring);
175 /* Release the memzone */
176 rte_memzone_free(tq->mz);
177 /* Release the queue */
183 vmxnet3_dev_rx_queue_release(void *rxq)
186 vmxnet3_rx_queue_t *rq = rxq;
190 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
191 vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
193 /* Release both the cmd_rings */
194 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
195 vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
197 /* Release the memzone */
198 rte_memzone_free(rq->mz);
200 /* Release the queue */
206 vmxnet3_dev_tx_queue_reset(void *txq)
208 vmxnet3_tx_queue_t *tq = txq;
209 struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
210 struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
211 struct vmxnet3_data_ring *data_ring = &tq->data_ring;
215 /* Release the cmd_ring mbufs */
216 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
219 /* Tx vmxnet rings structure initialization*/
222 ring->gen = VMXNET3_INIT_GEN;
223 comp_ring->next2proc = 0;
224 comp_ring->gen = VMXNET3_INIT_GEN;
226 size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
227 size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
228 size += tq->txdata_desc_size * data_ring->size;
230 memset(ring->base, 0, size);
234 vmxnet3_dev_rx_queue_reset(void *rxq)
237 vmxnet3_rx_queue_t *rq = rxq;
238 struct vmxnet3_hw *hw = rq->hw;
239 struct vmxnet3_cmd_ring *ring0, *ring1;
240 struct vmxnet3_comp_ring *comp_ring;
241 struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
244 /* Release both the cmd_rings mbufs */
245 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
246 vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
248 ring0 = &rq->cmd_ring[0];
249 ring1 = &rq->cmd_ring[1];
250 comp_ring = &rq->comp_ring;
252 /* Rx vmxnet rings structure initialization */
253 ring0->next2fill = 0;
254 ring1->next2fill = 0;
255 ring0->next2comp = 0;
256 ring1->next2comp = 0;
257 ring0->gen = VMXNET3_INIT_GEN;
258 ring1->gen = VMXNET3_INIT_GEN;
259 comp_ring->next2proc = 0;
260 comp_ring->gen = VMXNET3_INIT_GEN;
262 size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
263 size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
264 if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
265 size += rq->data_desc_size * data_ring->size;
267 memset(ring0->base, 0, size);
271 vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
275 PMD_INIT_FUNC_TRACE();
277 for (i = 0; i < dev->data->nb_tx_queues; i++) {
278 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
282 vmxnet3_dev_tx_queue_reset(txq);
286 for (i = 0; i < dev->data->nb_rx_queues; i++) {
287 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
291 vmxnet3_dev_rx_queue_reset(rxq);
297 vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
300 struct rte_mbuf *mbuf;
302 /* Release cmd_ring descriptor and free mbuf */
303 RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
305 mbuf = txq->cmd_ring.buf_info[eop_idx].m;
307 rte_panic("EOP desc does not point to a valid mbuf");
308 rte_pktmbuf_free(mbuf);
310 txq->cmd_ring.buf_info[eop_idx].m = NULL;
312 while (txq->cmd_ring.next2comp != eop_idx) {
313 /* no out-of-order completion */
314 RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
315 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
319 /* Mark the txd for which tcd was generated as completed */
320 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
322 return completed + 1;
326 vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
329 vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
330 struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
331 (comp_ring->base + comp_ring->next2proc);
333 while (tcd->gen == comp_ring->gen) {
334 completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
336 vmxnet3_comp_ring_adv_next2proc(comp_ring);
337 tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
338 comp_ring->next2proc);
341 PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
345 vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
353 for (i = 0; i != nb_pkts; i++) {
355 ol_flags = m->ol_flags;
357 /* Non-TSO packet cannot occupy more than
358 * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
360 if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
361 m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
366 /* check that only supported TX offloads are requested. */
367 if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
368 (ol_flags & PKT_TX_L4_MASK) ==
370 rte_errno = -ENOTSUP;
374 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
375 ret = rte_validate_tx_offload(m);
381 ret = rte_net_intel_cksum_prepare(m);
392 vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
396 vmxnet3_tx_queue_t *txq = tx_queue;
397 struct vmxnet3_hw *hw = txq->hw;
398 Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
399 uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
401 if (unlikely(txq->stopped)) {
402 PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
406 /* Free up the comp_descriptors aggressively */
407 vmxnet3_tq_tx_complete(txq);
410 while (nb_tx < nb_pkts) {
411 Vmxnet3_GenericDesc *gdesc;
412 vmxnet3_buf_info_t *tbi;
413 uint32_t first2fill, avail, dw2;
414 struct rte_mbuf *txm = tx_pkts[nb_tx];
415 struct rte_mbuf *m_seg = txm;
417 bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
418 /* # of descriptors needed for a packet. */
419 unsigned count = txm->nb_segs;
421 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
423 /* Is command ring full? */
424 if (unlikely(avail == 0)) {
425 PMD_TX_LOG(DEBUG, "No free ring descriptors");
426 txq->stats.tx_ring_full++;
427 txq->stats.drop_total += (nb_pkts - nb_tx);
431 /* Command ring is not full but cannot handle the
432 * multi-segmented packet. Let's try the next packet
435 PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
436 "(avail %d needed %d)", avail, count);
437 txq->stats.drop_total++;
439 txq->stats.drop_tso++;
440 rte_pktmbuf_free(txm);
445 /* Drop non-TSO packet that is excessively fragmented */
446 if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
447 PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
448 "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
449 txq->stats.drop_too_many_segs++;
450 txq->stats.drop_total++;
451 rte_pktmbuf_free(txm);
456 if (txm->nb_segs == 1 &&
457 rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
458 struct Vmxnet3_TxDataDesc *tdd;
460 tdd = (struct Vmxnet3_TxDataDesc *)
461 ((uint8 *)txq->data_ring.base +
462 txq->cmd_ring.next2fill *
463 txq->txdata_desc_size);
464 copy_size = rte_pktmbuf_pkt_len(txm);
465 rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
468 /* use the previous gen bit for the SOP desc */
469 dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
470 first2fill = txq->cmd_ring.next2fill;
472 /* Remember the transmit buffer for cleanup */
473 tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
475 /* NB: the following assumes that VMXNET3 maximum
476 * transmit buffer size (16K) is greater than
477 * maximum size of mbuf segment size.
479 gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
482 (uint64)txq->cmd_ring.next2fill *
483 txq->txdata_desc_size;
485 rte_cpu_to_le_64(txq->data_ring.basePA +
488 gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
491 gdesc->dword[2] = dw2 | m_seg->data_len;
494 /* move to the next2fill descriptor */
495 vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
497 /* use the right gen for non-SOP desc */
498 dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
499 } while ((m_seg = m_seg->next) != NULL);
501 /* set the last buf_info for the pkt */
503 /* Update the EOP descriptor */
504 gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
506 /* Add VLAN tag if present */
507 gdesc = txq->cmd_ring.base + first2fill;
508 if (txm->ol_flags & PKT_TX_VLAN_PKT) {
510 gdesc->txd.tci = txm->vlan_tci;
514 uint16_t mss = txm->tso_segsz;
518 gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
519 gdesc->txd.om = VMXNET3_OM_TSO;
520 gdesc->txd.msscof = mss;
522 deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
523 } else if (txm->ol_flags & PKT_TX_L4_MASK) {
524 gdesc->txd.om = VMXNET3_OM_CSUM;
525 gdesc->txd.hlen = txm->l2_len + txm->l3_len;
527 switch (txm->ol_flags & PKT_TX_L4_MASK) {
528 case PKT_TX_TCP_CKSUM:
529 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
531 case PKT_TX_UDP_CKSUM:
532 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
535 PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
536 txm->ol_flags & PKT_TX_L4_MASK);
542 gdesc->txd.om = VMXNET3_OM_NONE;
543 gdesc->txd.msscof = 0;
547 /* flip the GEN bit on the SOP */
548 rte_compiler_barrier();
549 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
551 txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
555 PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
557 if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
558 txq_ctrl->txNumDeferred = 0;
559 /* Notify vSwitch that packets are available. */
560 VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
561 txq->cmd_ring.next2fill);
568 vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
569 struct rte_mbuf *mbuf)
572 struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
573 struct Vmxnet3_RxDesc *rxd =
574 (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
575 vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
578 /* Usually: One HEAD type buf per packet
579 * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
580 * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
583 /* We use single packet buffer so all heads here */
584 val = VMXNET3_RXD_BTYPE_HEAD;
586 /* All BODY type buffers for 2nd ring */
587 val = VMXNET3_RXD_BTYPE_BODY;
591 * Load mbuf pointer into buf_info[ring_size]
592 * buf_info structure is equivalent to cookie for virtio-virtqueue
595 buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
596 buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
598 /* Load Rx Descriptor with the buffer's GPA */
599 rxd->addr = buf_info->bufPA;
601 /* After this point rxd->addr MUST not be NULL */
603 rxd->len = buf_info->len;
604 /* Flip gen bit at the end to change ownership */
605 rxd->gen = ring->gen;
607 vmxnet3_cmd_ring_adv_next2fill(ring);
610 * Allocates mbufs and clusters. Post rx descriptors with buffer details
611 * so that device can receive packets in those buffers.
613 * Among the two rings, 1st ring contains buffers of type 0 and type 1.
614 * bufs_per_pkt is set such that for non-LRO cases all the buffers required
615 * by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
616 * 2nd ring contains buffers of type 1 alone. Second ring mostly be used
620 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
624 struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
626 while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
627 struct rte_mbuf *mbuf;
629 /* Allocate blank mbuf for the current Rx Descriptor */
630 mbuf = rte_mbuf_raw_alloc(rxq->mp);
631 if (unlikely(mbuf == NULL)) {
632 PMD_RX_LOG(ERR, "Error allocating mbuf");
633 rxq->stats.rx_buf_alloc_failure++;
638 vmxnet3_renew_desc(rxq, ring_id, mbuf);
642 /* Return error only if no buffers are posted at present */
643 if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
650 /* Receive side checksum and other offloads */
652 vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
653 struct rte_mbuf *rxm, const uint8_t sop)
657 /* Offloads set in sop */
659 } else { /* Offloads set in eop */
661 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
662 rxm->ol_flags |= PKT_RX_RSS_HASH;
663 rxm->hash.rss = rcd->rssHash;
666 /* Check for hardware stripped VLAN tag */
668 rxm->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
669 rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
672 /* Check packet type, checksum errors. Only IPv4 for now. */
674 rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
678 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
680 if ((rcd->tcp || rcd->udp) && !rcd->tuc)
681 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
684 rxm->packet_type = RTE_PTYPE_UNKNOWN;
690 * Process the Rx Completion Ring of given vmxnet3_rx_queue
691 * for nb_pkts burst and return the number of packets received
694 vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
697 uint32_t nb_rxd, idx;
699 vmxnet3_rx_queue_t *rxq;
700 Vmxnet3_RxCompDesc *rcd;
701 vmxnet3_buf_info_t *rbi;
703 struct rte_mbuf *rxm = NULL;
704 struct vmxnet3_hw *hw;
714 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
716 if (unlikely(rxq->stopped)) {
717 PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
721 while (rcd->gen == rxq->comp_ring.gen) {
722 struct rte_mbuf *newm;
724 if (nb_rx >= nb_pkts)
727 newm = rte_mbuf_raw_alloc(rxq->mp);
728 if (unlikely(newm == NULL)) {
729 PMD_RX_LOG(ERR, "Error allocating mbuf");
730 rxq->stats.rx_buf_alloc_failure++;
735 ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
736 rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
737 RTE_SET_USED(rxd); /* used only for assert when enabled */
738 rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
740 PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
742 RTE_ASSERT(rcd->len <= rxd->len);
745 /* Get the packet buffer pointer from buf_info */
748 /* Clear descriptor associated buf_info to be reused */
752 /* Update the index that we received a packet */
753 rxq->cmd_ring[ring_idx].next2comp = idx;
755 /* For RCD with EOP set, check if there is frame error */
756 if (unlikely(rcd->eop && rcd->err)) {
757 rxq->stats.drop_total++;
758 rxq->stats.drop_err++;
761 rxq->stats.drop_fcs++;
762 PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
764 PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
765 (int)(rcd - (struct Vmxnet3_RxCompDesc *)
766 rxq->comp_ring.base), rcd->rxdIdx);
767 rte_pktmbuf_free_seg(rxm);
768 if (rxq->start_seg) {
769 struct rte_mbuf *start = rxq->start_seg;
771 rxq->start_seg = NULL;
772 rte_pktmbuf_free(start);
777 /* Initialize newly received packet buffer */
778 rxm->port = rxq->port_id;
781 rxm->pkt_len = (uint16_t)rcd->len;
782 rxm->data_len = (uint16_t)rcd->len;
783 rxm->data_off = RTE_PKTMBUF_HEADROOM;
788 * If this is the first buffer of the received packet,
789 * set the pointer to the first mbuf of the packet
790 * Otherwise, update the total length and the number of segments
791 * of the current scattered packet, and update the pointer to
792 * the last mbuf of the current packet.
795 RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
797 if (unlikely(rcd->len == 0)) {
798 RTE_ASSERT(rcd->eop);
801 "Rx buf was skipped. rxring[%d][%d])",
803 rte_pktmbuf_free_seg(rxm);
807 if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
808 uint8_t *rdd = rxq->data_ring.base +
809 idx * rxq->data_desc_size;
811 RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
812 rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
816 rxq->start_seg = rxm;
817 vmxnet3_rx_offload(hw, rcd, rxm, 1);
819 struct rte_mbuf *start = rxq->start_seg;
821 RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
823 start->pkt_len += rxm->data_len;
826 rxq->last_seg->next = rxm;
831 struct rte_mbuf *start = rxq->start_seg;
833 vmxnet3_rx_offload(hw, rcd, start, 0);
834 rx_pkts[nb_rx++] = start;
835 rxq->start_seg = NULL;
839 rxq->cmd_ring[ring_idx].next2comp = idx;
840 VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
841 rxq->cmd_ring[ring_idx].size);
843 /* It's time to renew descriptors */
844 vmxnet3_renew_desc(rxq, ring_idx, newm);
845 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
846 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
847 rxq->cmd_ring[ring_idx].next2fill);
850 /* Advance to the next descriptor in comp_ring */
851 vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
853 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
855 if (nb_rxd > rxq->cmd_ring[0].size) {
856 PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
857 " relinquish control.");
862 if (unlikely(nb_rxd == 0)) {
864 for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
865 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
866 if (unlikely(avail > 0)) {
867 /* try to alloc new buf and renew descriptors */
868 vmxnet3_post_rx_bufs(rxq, ring_idx);
871 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
872 for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
873 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
874 rxq->cmd_ring[ring_idx].next2fill);
883 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
886 unsigned int socket_id,
887 const struct rte_eth_txconf *tx_conf)
889 struct vmxnet3_hw *hw = dev->data->dev_private;
890 const struct rte_memzone *mz;
891 struct vmxnet3_tx_queue *txq;
892 struct vmxnet3_cmd_ring *ring;
893 struct vmxnet3_comp_ring *comp_ring;
894 struct vmxnet3_data_ring *data_ring;
897 PMD_INIT_FUNC_TRACE();
899 if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
900 ETH_TXQ_FLAGS_NOXSUMSCTP) {
901 PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
905 txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
906 RTE_CACHE_LINE_SIZE);
908 PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
912 txq->queue_id = queue_idx;
913 txq->port_id = dev->data->port_id;
914 txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
916 txq->qid = queue_idx;
918 txq->txdata_desc_size = hw->txdata_desc_size;
920 ring = &txq->cmd_ring;
921 comp_ring = &txq->comp_ring;
922 data_ring = &txq->data_ring;
924 /* Tx vmxnet ring length should be between 512-4096 */
925 if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
926 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
927 VMXNET3_DEF_TX_RING_SIZE);
929 } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
930 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
931 VMXNET3_TX_RING_MAX_SIZE);
934 ring->size = nb_desc;
935 ring->size &= ~VMXNET3_RING_SIZE_MASK;
937 comp_ring->size = data_ring->size = ring->size;
939 /* Tx vmxnet rings structure initialization*/
942 ring->gen = VMXNET3_INIT_GEN;
943 comp_ring->next2proc = 0;
944 comp_ring->gen = VMXNET3_INIT_GEN;
946 size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
947 size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
948 size += txq->txdata_desc_size * data_ring->size;
950 mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
951 VMXNET3_RING_BA_ALIGN, socket_id);
953 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
957 memset(mz->addr, 0, mz->len);
959 /* cmd_ring initialization */
960 ring->base = mz->addr;
961 ring->basePA = mz->iova;
963 /* comp_ring initialization */
964 comp_ring->base = ring->base + ring->size;
965 comp_ring->basePA = ring->basePA +
966 (sizeof(struct Vmxnet3_TxDesc) * ring->size);
968 /* data_ring initialization */
969 data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
970 data_ring->basePA = comp_ring->basePA +
971 (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
973 /* cmd_ring0 buf_info allocation */
974 ring->buf_info = rte_zmalloc("tx_ring_buf_info",
975 ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
976 if (ring->buf_info == NULL) {
977 PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
981 /* Update the data portion with txq */
982 dev->data->tx_queues[queue_idx] = txq;
988 vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
991 unsigned int socket_id,
992 __rte_unused const struct rte_eth_rxconf *rx_conf,
993 struct rte_mempool *mp)
995 const struct rte_memzone *mz;
996 struct vmxnet3_rx_queue *rxq;
997 struct vmxnet3_hw *hw = dev->data->dev_private;
998 struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
999 struct vmxnet3_comp_ring *comp_ring;
1000 struct vmxnet3_rx_data_ring *data_ring;
1005 PMD_INIT_FUNC_TRACE();
1007 rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
1008 RTE_CACHE_LINE_SIZE);
1010 PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
1015 rxq->queue_id = queue_idx;
1016 rxq->port_id = dev->data->port_id;
1017 rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
1019 rxq->qid1 = queue_idx;
1020 rxq->qid2 = queue_idx + hw->num_rx_queues;
1021 rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
1022 rxq->data_desc_size = hw->rxdata_desc_size;
1023 rxq->stopped = TRUE;
1025 ring0 = &rxq->cmd_ring[0];
1026 ring1 = &rxq->cmd_ring[1];
1027 comp_ring = &rxq->comp_ring;
1028 data_ring = &rxq->data_ring;
1030 /* Rx vmxnet rings length should be between 256-4096 */
1031 if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
1032 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
1034 } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
1035 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
1038 ring0->size = nb_desc;
1039 ring0->size &= ~VMXNET3_RING_SIZE_MASK;
1040 ring1->size = ring0->size;
1043 comp_ring->size = ring0->size + ring1->size;
1044 data_ring->size = ring0->size;
1046 /* Rx vmxnet rings structure initialization */
1047 ring0->next2fill = 0;
1048 ring1->next2fill = 0;
1049 ring0->next2comp = 0;
1050 ring1->next2comp = 0;
1051 ring0->gen = VMXNET3_INIT_GEN;
1052 ring1->gen = VMXNET3_INIT_GEN;
1053 comp_ring->next2proc = 0;
1054 comp_ring->gen = VMXNET3_INIT_GEN;
1056 size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
1057 size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1058 if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
1059 size += rxq->data_desc_size * data_ring->size;
1061 mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
1062 VMXNET3_RING_BA_ALIGN, socket_id);
1064 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
1068 memset(mz->addr, 0, mz->len);
1070 /* cmd_ring0 initialization */
1071 ring0->base = mz->addr;
1072 ring0->basePA = mz->iova;
1074 /* cmd_ring1 initialization */
1075 ring1->base = ring0->base + ring0->size;
1076 ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
1078 /* comp_ring initialization */
1079 comp_ring->base = ring1->base + ring1->size;
1080 comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
1083 /* data_ring initialization */
1084 if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
1086 (uint8_t *)(comp_ring->base + comp_ring->size);
1087 data_ring->basePA = comp_ring->basePA +
1088 sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1091 /* cmd_ring0-cmd_ring1 buf_info allocation */
1092 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
1094 ring = &rxq->cmd_ring[i];
1096 snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
1098 ring->buf_info = rte_zmalloc(mem_name,
1099 ring->size * sizeof(vmxnet3_buf_info_t),
1100 RTE_CACHE_LINE_SIZE);
1101 if (ring->buf_info == NULL) {
1102 PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
1107 /* Update the data portion with rxq */
1108 dev->data->rx_queues[queue_idx] = rxq;
1114 * Initializes Receive Unit
1115 * Load mbufs in rx queue in advance
1118 vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
1120 struct vmxnet3_hw *hw = dev->data->dev_private;
1125 PMD_INIT_FUNC_TRACE();
1127 for (i = 0; i < hw->num_rx_queues; i++) {
1128 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
1130 for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
1131 /* Passing 0 as alloc_num will allocate full ring */
1132 ret = vmxnet3_post_rx_bufs(rxq, j);
1135 "ERROR: Posting Rxq: %d buffers ring: %d",
1140 * Updating device with the index:next2fill to fill the
1141 * mbufs for coming packets.
1143 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
1144 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
1145 rxq->cmd_ring[j].next2fill);
1148 rxq->stopped = FALSE;
1149 rxq->start_seg = NULL;
1152 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1153 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1155 txq->stopped = FALSE;
1161 static uint8_t rss_intel_key[40] = {
1162 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1163 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1164 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1165 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1166 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1170 * Configure RSS feature
1173 vmxnet3_rss_configure(struct rte_eth_dev *dev)
1175 struct vmxnet3_hw *hw = dev->data->dev_private;
1176 struct VMXNET3_RSSConf *dev_rss_conf;
1177 struct rte_eth_rss_conf *port_rss_conf;
1181 PMD_INIT_FUNC_TRACE();
1183 dev_rss_conf = hw->rss_conf;
1184 port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1186 /* loading hashFunc */
1187 dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
1188 /* loading hashKeySize */
1189 dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
1190 /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
1191 dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
1193 if (port_rss_conf->rss_key == NULL) {
1194 /* Default hash key */
1195 port_rss_conf->rss_key = rss_intel_key;
1198 /* loading hashKey */
1199 memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
1200 dev_rss_conf->hashKeySize);
1202 /* loading indTable */
1203 for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
1204 if (j == dev->data->nb_rx_queues)
1206 dev_rss_conf->indTable[i] = j;
1209 /* loading hashType */
1210 dev_rss_conf->hashType = 0;
1211 rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
1212 if (rss_hf & ETH_RSS_IPV4)
1213 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
1214 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1215 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
1216 if (rss_hf & ETH_RSS_IPV6)
1217 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
1218 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1219 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
1221 return VMXNET3_SUCCESS;