1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
18 #include <rte_cycles.h>
20 #include <rte_debug.h>
21 #include <rte_interrupts.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
34 #include <rte_ether.h>
35 #include <rte_ethdev_driver.h>
36 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
42 #include <rte_errno.h>
45 #include "base/vmxnet3_defs.h"
46 #include "vmxnet3_ring.h"
48 #include "vmxnet3_logs.h"
49 #include "vmxnet3_ethdev.h"
51 #define VMXNET3_TX_OFFLOAD_MASK ( \
56 #define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
57 (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
59 static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
61 static int vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t*, uint8_t);
62 static void vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *);
63 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
64 static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
65 static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
68 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
70 vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
78 "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
79 rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
81 "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
82 (unsigned long)rxq->cmd_ring[0].basePA,
83 (unsigned long)rxq->cmd_ring[1].basePA,
84 (unsigned long)rxq->comp_ring.basePA);
86 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
88 "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u",
89 (uint32_t)rxq->cmd_ring[0].size, avail,
90 rxq->comp_ring.next2proc,
91 rxq->cmd_ring[0].size - avail);
93 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
94 PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u",
95 (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
96 rxq->cmd_ring[1].size - avail);
101 vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
108 PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
109 txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
110 PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
111 (unsigned long)txq->cmd_ring.basePA,
112 (unsigned long)txq->comp_ring.basePA,
113 (unsigned long)txq->data_ring.basePA);
115 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
116 PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u",
117 (uint32_t)txq->cmd_ring.size, avail,
118 txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
123 vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
125 while (ring->next2comp != ring->next2fill) {
126 /* No need to worry about desc ownership, device is quiesced by now. */
127 vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
130 rte_pktmbuf_free(buf_info->m);
135 vmxnet3_cmd_ring_adv_next2comp(ring);
140 vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
144 for (i = 0; i < ring->size; i++) {
145 /* No need to worry about desc ownership, device is quiesced by now. */
146 vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
149 rte_pktmbuf_free_seg(buf_info->m);
154 vmxnet3_cmd_ring_adv_next2comp(ring);
159 vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
161 rte_free(ring->buf_info);
162 ring->buf_info = NULL;
166 vmxnet3_dev_tx_queue_release(void *txq)
168 vmxnet3_tx_queue_t *tq = txq;
172 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
173 /* Release the cmd_ring */
174 vmxnet3_cmd_ring_release(&tq->cmd_ring);
175 /* Release the memzone */
176 rte_memzone_free(tq->mz);
177 /* Release the queue */
183 vmxnet3_dev_rx_queue_release(void *rxq)
186 vmxnet3_rx_queue_t *rq = rxq;
190 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
191 vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
193 /* Release both the cmd_rings */
194 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
195 vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
197 /* Release the memzone */
198 rte_memzone_free(rq->mz);
200 /* Release the queue */
206 vmxnet3_dev_tx_queue_reset(void *txq)
208 vmxnet3_tx_queue_t *tq = txq;
209 struct vmxnet3_cmd_ring *ring = &tq->cmd_ring;
210 struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring;
211 struct vmxnet3_data_ring *data_ring = &tq->data_ring;
215 /* Release the cmd_ring mbufs */
216 vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
219 /* Tx vmxnet rings structure initialization*/
222 ring->gen = VMXNET3_INIT_GEN;
223 comp_ring->next2proc = 0;
224 comp_ring->gen = VMXNET3_INIT_GEN;
226 size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
227 size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
228 size += tq->txdata_desc_size * data_ring->size;
230 memset(ring->base, 0, size);
234 vmxnet3_dev_rx_queue_reset(void *rxq)
237 vmxnet3_rx_queue_t *rq = rxq;
238 struct vmxnet3_hw *hw = rq->hw;
239 struct vmxnet3_cmd_ring *ring0, *ring1;
240 struct vmxnet3_comp_ring *comp_ring;
241 struct vmxnet3_rx_data_ring *data_ring = &rq->data_ring;
244 /* Release both the cmd_rings mbufs */
245 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
246 vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
248 ring0 = &rq->cmd_ring[0];
249 ring1 = &rq->cmd_ring[1];
250 comp_ring = &rq->comp_ring;
252 /* Rx vmxnet rings structure initialization */
253 ring0->next2fill = 0;
254 ring1->next2fill = 0;
255 ring0->next2comp = 0;
256 ring1->next2comp = 0;
257 ring0->gen = VMXNET3_INIT_GEN;
258 ring1->gen = VMXNET3_INIT_GEN;
259 comp_ring->next2proc = 0;
260 comp_ring->gen = VMXNET3_INIT_GEN;
262 size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
263 size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
264 if (VMXNET3_VERSION_GE_3(hw) && rq->data_desc_size)
265 size += rq->data_desc_size * data_ring->size;
267 memset(ring0->base, 0, size);
271 vmxnet3_dev_clear_queues(struct rte_eth_dev *dev)
275 PMD_INIT_FUNC_TRACE();
277 for (i = 0; i < dev->data->nb_tx_queues; i++) {
278 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
282 vmxnet3_dev_tx_queue_reset(txq);
286 for (i = 0; i < dev->data->nb_rx_queues; i++) {
287 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
291 vmxnet3_dev_rx_queue_reset(rxq);
297 vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
300 struct rte_mbuf *mbuf;
302 /* Release cmd_ring descriptor and free mbuf */
303 RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
305 mbuf = txq->cmd_ring.buf_info[eop_idx].m;
307 rte_panic("EOP desc does not point to a valid mbuf");
308 rte_pktmbuf_free(mbuf);
310 txq->cmd_ring.buf_info[eop_idx].m = NULL;
312 while (txq->cmd_ring.next2comp != eop_idx) {
313 /* no out-of-order completion */
314 RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
315 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
319 /* Mark the txd for which tcd was generated as completed */
320 vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
322 return completed + 1;
326 vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
329 vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
330 struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
331 (comp_ring->base + comp_ring->next2proc);
333 while (tcd->gen == comp_ring->gen) {
334 completed += vmxnet3_unmap_pkt(tcd->txdIdx, txq);
336 vmxnet3_comp_ring_adv_next2proc(comp_ring);
337 tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
338 comp_ring->next2proc);
341 PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
345 vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
353 for (i = 0; i != nb_pkts; i++) {
355 ol_flags = m->ol_flags;
357 /* Non-TSO packet cannot occupy more than
358 * VMXNET3_MAX_TXD_PER_PKT TX descriptors.
360 if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
361 m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
366 /* check that only supported TX offloads are requested. */
367 if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
368 (ol_flags & PKT_TX_L4_MASK) ==
370 rte_errno = -ENOTSUP;
374 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
375 ret = rte_validate_tx_offload(m);
381 ret = rte_net_intel_cksum_prepare(m);
392 vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
396 vmxnet3_tx_queue_t *txq = tx_queue;
397 struct vmxnet3_hw *hw = txq->hw;
398 Vmxnet3_TxQueueCtrl *txq_ctrl = &txq->shared->ctrl;
399 uint32_t deferred = rte_le_to_cpu_32(txq_ctrl->txNumDeferred);
401 if (unlikely(txq->stopped)) {
402 PMD_TX_LOG(DEBUG, "Tx queue is stopped.");
406 /* Free up the comp_descriptors aggressively */
407 vmxnet3_tq_tx_complete(txq);
410 while (nb_tx < nb_pkts) {
411 Vmxnet3_GenericDesc *gdesc;
412 vmxnet3_buf_info_t *tbi;
413 uint32_t first2fill, avail, dw2;
414 struct rte_mbuf *txm = tx_pkts[nb_tx];
415 struct rte_mbuf *m_seg = txm;
417 bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
418 /* # of descriptors needed for a packet. */
419 unsigned count = txm->nb_segs;
421 avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
423 /* Is command ring full? */
424 if (unlikely(avail == 0)) {
425 PMD_TX_LOG(DEBUG, "No free ring descriptors");
426 txq->stats.tx_ring_full++;
427 txq->stats.drop_total += (nb_pkts - nb_tx);
431 /* Command ring is not full but cannot handle the
432 * multi-segmented packet. Let's try the next packet
435 PMD_TX_LOG(DEBUG, "Running out of ring descriptors "
436 "(avail %d needed %d)", avail, count);
437 txq->stats.drop_total++;
439 txq->stats.drop_tso++;
440 rte_pktmbuf_free(txm);
445 /* Drop non-TSO packet that is excessively fragmented */
446 if (unlikely(!tso && count > VMXNET3_MAX_TXD_PER_PKT)) {
447 PMD_TX_LOG(ERR, "Non-TSO packet cannot occupy more than %d tx "
448 "descriptors. Packet dropped.", VMXNET3_MAX_TXD_PER_PKT);
449 txq->stats.drop_too_many_segs++;
450 txq->stats.drop_total++;
451 rte_pktmbuf_free(txm);
456 if (txm->nb_segs == 1 &&
457 rte_pktmbuf_pkt_len(txm) <= txq->txdata_desc_size) {
458 struct Vmxnet3_TxDataDesc *tdd;
460 tdd = (struct Vmxnet3_TxDataDesc *)
461 ((uint8 *)txq->data_ring.base +
462 txq->cmd_ring.next2fill *
463 txq->txdata_desc_size);
464 copy_size = rte_pktmbuf_pkt_len(txm);
465 rte_memcpy(tdd->data, rte_pktmbuf_mtod(txm, char *), copy_size);
468 /* use the previous gen bit for the SOP desc */
469 dw2 = (txq->cmd_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
470 first2fill = txq->cmd_ring.next2fill;
472 /* Remember the transmit buffer for cleanup */
473 tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
475 /* NB: the following assumes that VMXNET3 maximum
476 * transmit buffer size (16K) is greater than
477 * maximum size of mbuf segment size.
479 gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
482 (uint64)txq->cmd_ring.next2fill *
483 txq->txdata_desc_size;
485 rte_cpu_to_le_64(txq->data_ring.basePA +
488 gdesc->txd.addr = rte_mbuf_data_iova(m_seg);
491 gdesc->dword[2] = dw2 | m_seg->data_len;
494 /* move to the next2fill descriptor */
495 vmxnet3_cmd_ring_adv_next2fill(&txq->cmd_ring);
497 /* use the right gen for non-SOP desc */
498 dw2 = txq->cmd_ring.gen << VMXNET3_TXD_GEN_SHIFT;
499 } while ((m_seg = m_seg->next) != NULL);
501 /* set the last buf_info for the pkt */
503 /* Update the EOP descriptor */
504 gdesc->dword[3] |= VMXNET3_TXD_EOP | VMXNET3_TXD_CQ;
506 /* Add VLAN tag if present */
507 gdesc = txq->cmd_ring.base + first2fill;
508 if (txm->ol_flags & PKT_TX_VLAN_PKT) {
510 gdesc->txd.tci = txm->vlan_tci;
514 uint16_t mss = txm->tso_segsz;
518 gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
519 gdesc->txd.om = VMXNET3_OM_TSO;
520 gdesc->txd.msscof = mss;
522 deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
523 } else if (txm->ol_flags & PKT_TX_L4_MASK) {
524 gdesc->txd.om = VMXNET3_OM_CSUM;
525 gdesc->txd.hlen = txm->l2_len + txm->l3_len;
527 switch (txm->ol_flags & PKT_TX_L4_MASK) {
528 case PKT_TX_TCP_CKSUM:
529 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum);
531 case PKT_TX_UDP_CKSUM:
532 gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
535 PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
536 txm->ol_flags & PKT_TX_L4_MASK);
542 gdesc->txd.om = VMXNET3_OM_NONE;
543 gdesc->txd.msscof = 0;
547 /* flip the GEN bit on the SOP */
548 rte_compiler_barrier();
549 gdesc->dword[2] ^= VMXNET3_TXD_GEN;
551 txq_ctrl->txNumDeferred = rte_cpu_to_le_32(deferred);
555 PMD_TX_LOG(DEBUG, "vmxnet3 txThreshold: %u", rte_le_to_cpu_32(txq_ctrl->txThreshold));
557 if (deferred >= rte_le_to_cpu_32(txq_ctrl->txThreshold)) {
558 txq_ctrl->txNumDeferred = 0;
559 /* Notify vSwitch that packets are available. */
560 VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
561 txq->cmd_ring.next2fill);
568 vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
569 struct rte_mbuf *mbuf)
572 struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
573 struct Vmxnet3_RxDesc *rxd =
574 (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
575 vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
578 /* Usually: One HEAD type buf per packet
579 * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
580 * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
583 /* We use single packet buffer so all heads here */
584 val = VMXNET3_RXD_BTYPE_HEAD;
586 /* All BODY type buffers for 2nd ring */
587 val = VMXNET3_RXD_BTYPE_BODY;
591 * Load mbuf pointer into buf_info[ring_size]
592 * buf_info structure is equivalent to cookie for virtio-virtqueue
595 buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
596 buf_info->bufPA = rte_mbuf_data_iova_default(mbuf);
598 /* Load Rx Descriptor with the buffer's GPA */
599 rxd->addr = buf_info->bufPA;
601 /* After this point rxd->addr MUST not be NULL */
603 rxd->len = buf_info->len;
604 /* Flip gen bit at the end to change ownership */
605 rxd->gen = ring->gen;
607 vmxnet3_cmd_ring_adv_next2fill(ring);
610 * Allocates mbufs and clusters. Post rx descriptors with buffer details
611 * so that device can receive packets in those buffers.
613 * Among the two rings, 1st ring contains buffers of type 0 and type 1.
614 * bufs_per_pkt is set such that for non-LRO cases all the buffers required
615 * by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
616 * 2nd ring contains buffers of type 1 alone. Second ring mostly be used
620 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
624 struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
626 while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
627 struct rte_mbuf *mbuf;
629 /* Allocate blank mbuf for the current Rx Descriptor */
630 mbuf = rte_mbuf_raw_alloc(rxq->mp);
631 if (unlikely(mbuf == NULL)) {
632 PMD_RX_LOG(ERR, "Error allocating mbuf");
633 rxq->stats.rx_buf_alloc_failure++;
638 vmxnet3_renew_desc(rxq, ring_id, mbuf);
642 /* Return error only if no buffers are posted at present */
643 if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
650 /* Receive side checksum and other offloads */
652 vmxnet3_rx_offload(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd,
653 struct rte_mbuf *rxm, const uint8_t sop)
657 /* Offloads set in sop */
660 if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
661 rxm->ol_flags |= PKT_RX_RSS_HASH;
662 rxm->hash.rss = rcd->rssHash;
665 /* Check packet type, checksum errors. Only IPv4 for now. */
667 rxm->packet_type = RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
671 rxm->ol_flags |= PKT_RX_IP_CKSUM_BAD;
673 if ((rcd->tcp || rcd->udp) && !rcd->tuc)
674 rxm->ol_flags |= PKT_RX_L4_CKSUM_BAD;
677 rxm->packet_type = RTE_PTYPE_UNKNOWN;
679 } else { /* Offloads set in eop */
680 /* Check for hardware stripped VLAN tag */
682 rxm->ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
683 rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
689 * Process the Rx Completion Ring of given vmxnet3_rx_queue
690 * for nb_pkts burst and return the number of packets received
693 vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
696 uint32_t nb_rxd, idx;
698 vmxnet3_rx_queue_t *rxq;
699 Vmxnet3_RxCompDesc *rcd;
700 vmxnet3_buf_info_t *rbi;
702 struct rte_mbuf *rxm = NULL;
703 struct vmxnet3_hw *hw;
713 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
715 if (unlikely(rxq->stopped)) {
716 PMD_RX_LOG(DEBUG, "Rx queue is stopped.");
720 while (rcd->gen == rxq->comp_ring.gen) {
721 struct rte_mbuf *newm;
723 if (nb_rx >= nb_pkts)
726 newm = rte_mbuf_raw_alloc(rxq->mp);
727 if (unlikely(newm == NULL)) {
728 PMD_RX_LOG(ERR, "Error allocating mbuf");
729 rxq->stats.rx_buf_alloc_failure++;
734 ring_idx = vmxnet3_get_ring_idx(hw, rcd->rqID);
735 rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
736 RTE_SET_USED(rxd); /* used only for assert when enabled */
737 rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
739 PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
741 RTE_ASSERT(rcd->len <= rxd->len);
744 /* Get the packet buffer pointer from buf_info */
747 /* Clear descriptor associated buf_info to be reused */
751 /* Update the index that we received a packet */
752 rxq->cmd_ring[ring_idx].next2comp = idx;
754 /* For RCD with EOP set, check if there is frame error */
755 if (unlikely(rcd->eop && rcd->err)) {
756 rxq->stats.drop_total++;
757 rxq->stats.drop_err++;
760 rxq->stats.drop_fcs++;
761 PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.");
763 PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d",
764 (int)(rcd - (struct Vmxnet3_RxCompDesc *)
765 rxq->comp_ring.base), rcd->rxdIdx);
766 rte_pktmbuf_free_seg(rxm);
767 if (rxq->start_seg) {
768 struct rte_mbuf *start = rxq->start_seg;
770 rxq->start_seg = NULL;
771 rte_pktmbuf_free(start);
776 /* Initialize newly received packet buffer */
777 rxm->port = rxq->port_id;
780 rxm->pkt_len = (uint16_t)rcd->len;
781 rxm->data_len = (uint16_t)rcd->len;
782 rxm->data_off = RTE_PKTMBUF_HEADROOM;
787 * If this is the first buffer of the received packet,
788 * set the pointer to the first mbuf of the packet
789 * Otherwise, update the total length and the number of segments
790 * of the current scattered packet, and update the pointer to
791 * the last mbuf of the current packet.
794 RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
796 if (unlikely(rcd->len == 0)) {
797 RTE_ASSERT(rcd->eop);
800 "Rx buf was skipped. rxring[%d][%d])",
802 rte_pktmbuf_free_seg(rxm);
806 if (vmxnet3_rx_data_ring(hw, rcd->rqID)) {
807 uint8_t *rdd = rxq->data_ring.base +
808 idx * rxq->data_desc_size;
810 RTE_ASSERT(VMXNET3_VERSION_GE_3(hw));
811 rte_memcpy(rte_pktmbuf_mtod(rxm, char *),
815 rxq->start_seg = rxm;
816 vmxnet3_rx_offload(hw, rcd, rxm, 1);
818 struct rte_mbuf *start = rxq->start_seg;
820 RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
822 start->pkt_len += rxm->data_len;
825 rxq->last_seg->next = rxm;
830 struct rte_mbuf *start = rxq->start_seg;
832 vmxnet3_rx_offload(hw, rcd, start, 0);
833 rx_pkts[nb_rx++] = start;
834 rxq->start_seg = NULL;
838 rxq->cmd_ring[ring_idx].next2comp = idx;
839 VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
840 rxq->cmd_ring[ring_idx].size);
842 /* It's time to renew descriptors */
843 vmxnet3_renew_desc(rxq, ring_idx, newm);
844 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
845 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
846 rxq->cmd_ring[ring_idx].next2fill);
849 /* Advance to the next descriptor in comp_ring */
850 vmxnet3_comp_ring_adv_next2proc(&rxq->comp_ring);
852 rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
854 if (nb_rxd > rxq->cmd_ring[0].size) {
855 PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
856 " relinquish control.");
861 if (unlikely(nb_rxd == 0)) {
863 for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
864 avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[ring_idx]);
865 if (unlikely(avail > 0)) {
866 /* try to alloc new buf and renew descriptors */
867 vmxnet3_post_rx_bufs(rxq, ring_idx);
870 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
871 for (ring_idx = 0; ring_idx < VMXNET3_RX_CMDRING_SIZE; ring_idx++) {
872 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
873 rxq->cmd_ring[ring_idx].next2fill);
882 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
885 unsigned int socket_id,
886 const struct rte_eth_txconf *tx_conf)
888 struct vmxnet3_hw *hw = dev->data->dev_private;
889 const struct rte_memzone *mz;
890 struct vmxnet3_tx_queue *txq;
891 struct vmxnet3_cmd_ring *ring;
892 struct vmxnet3_comp_ring *comp_ring;
893 struct vmxnet3_data_ring *data_ring;
896 PMD_INIT_FUNC_TRACE();
898 if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOXSUMSCTP) !=
899 ETH_TXQ_FLAGS_NOXSUMSCTP) {
900 PMD_INIT_LOG(ERR, "SCTP checksum offload not supported");
904 txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
905 RTE_CACHE_LINE_SIZE);
907 PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
911 txq->queue_id = queue_idx;
912 txq->port_id = dev->data->port_id;
913 txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
915 txq->qid = queue_idx;
917 txq->txdata_desc_size = hw->txdata_desc_size;
919 ring = &txq->cmd_ring;
920 comp_ring = &txq->comp_ring;
921 data_ring = &txq->data_ring;
923 /* Tx vmxnet ring length should be between 512-4096 */
924 if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
925 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u",
926 VMXNET3_DEF_TX_RING_SIZE);
928 } else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
929 PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u",
930 VMXNET3_TX_RING_MAX_SIZE);
933 ring->size = nb_desc;
934 ring->size &= ~VMXNET3_RING_SIZE_MASK;
936 comp_ring->size = data_ring->size = ring->size;
938 /* Tx vmxnet rings structure initialization*/
941 ring->gen = VMXNET3_INIT_GEN;
942 comp_ring->next2proc = 0;
943 comp_ring->gen = VMXNET3_INIT_GEN;
945 size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
946 size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
947 size += txq->txdata_desc_size * data_ring->size;
949 mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
950 VMXNET3_RING_BA_ALIGN, socket_id);
952 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
956 memset(mz->addr, 0, mz->len);
958 /* cmd_ring initialization */
959 ring->base = mz->addr;
960 ring->basePA = mz->iova;
962 /* comp_ring initialization */
963 comp_ring->base = ring->base + ring->size;
964 comp_ring->basePA = ring->basePA +
965 (sizeof(struct Vmxnet3_TxDesc) * ring->size);
967 /* data_ring initialization */
968 data_ring->base = (Vmxnet3_TxDataDesc *)(comp_ring->base + comp_ring->size);
969 data_ring->basePA = comp_ring->basePA +
970 (sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size);
972 /* cmd_ring0 buf_info allocation */
973 ring->buf_info = rte_zmalloc("tx_ring_buf_info",
974 ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
975 if (ring->buf_info == NULL) {
976 PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure");
980 /* Update the data portion with txq */
981 dev->data->tx_queues[queue_idx] = txq;
987 vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
990 unsigned int socket_id,
991 __rte_unused const struct rte_eth_rxconf *rx_conf,
992 struct rte_mempool *mp)
994 const struct rte_memzone *mz;
995 struct vmxnet3_rx_queue *rxq;
996 struct vmxnet3_hw *hw = dev->data->dev_private;
997 struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
998 struct vmxnet3_comp_ring *comp_ring;
999 struct vmxnet3_rx_data_ring *data_ring;
1004 PMD_INIT_FUNC_TRACE();
1006 rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
1007 RTE_CACHE_LINE_SIZE);
1009 PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
1014 rxq->queue_id = queue_idx;
1015 rxq->port_id = dev->data->port_id;
1016 rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
1018 rxq->qid1 = queue_idx;
1019 rxq->qid2 = queue_idx + hw->num_rx_queues;
1020 rxq->data_ring_qid = queue_idx + 2 * hw->num_rx_queues;
1021 rxq->data_desc_size = hw->rxdata_desc_size;
1022 rxq->stopped = TRUE;
1024 ring0 = &rxq->cmd_ring[0];
1025 ring1 = &rxq->cmd_ring[1];
1026 comp_ring = &rxq->comp_ring;
1027 data_ring = &rxq->data_ring;
1029 /* Rx vmxnet rings length should be between 256-4096 */
1030 if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
1031 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256");
1033 } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
1034 PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096");
1037 ring0->size = nb_desc;
1038 ring0->size &= ~VMXNET3_RING_SIZE_MASK;
1039 ring1->size = ring0->size;
1042 comp_ring->size = ring0->size + ring1->size;
1043 data_ring->size = ring0->size;
1045 /* Rx vmxnet rings structure initialization */
1046 ring0->next2fill = 0;
1047 ring1->next2fill = 0;
1048 ring0->next2comp = 0;
1049 ring1->next2comp = 0;
1050 ring0->gen = VMXNET3_INIT_GEN;
1051 ring1->gen = VMXNET3_INIT_GEN;
1052 comp_ring->next2proc = 0;
1053 comp_ring->gen = VMXNET3_INIT_GEN;
1055 size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
1056 size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1057 if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size)
1058 size += rxq->data_desc_size * data_ring->size;
1060 mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
1061 VMXNET3_RING_BA_ALIGN, socket_id);
1063 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
1067 memset(mz->addr, 0, mz->len);
1069 /* cmd_ring0 initialization */
1070 ring0->base = mz->addr;
1071 ring0->basePA = mz->iova;
1073 /* cmd_ring1 initialization */
1074 ring1->base = ring0->base + ring0->size;
1075 ring1->basePA = ring0->basePA + sizeof(struct Vmxnet3_RxDesc) * ring0->size;
1077 /* comp_ring initialization */
1078 comp_ring->base = ring1->base + ring1->size;
1079 comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
1082 /* data_ring initialization */
1083 if (VMXNET3_VERSION_GE_3(hw) && rxq->data_desc_size) {
1085 (uint8_t *)(comp_ring->base + comp_ring->size);
1086 data_ring->basePA = comp_ring->basePA +
1087 sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
1090 /* cmd_ring0-cmd_ring1 buf_info allocation */
1091 for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
1093 ring = &rxq->cmd_ring[i];
1095 snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
1097 ring->buf_info = rte_zmalloc(mem_name,
1098 ring->size * sizeof(vmxnet3_buf_info_t),
1099 RTE_CACHE_LINE_SIZE);
1100 if (ring->buf_info == NULL) {
1101 PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
1106 /* Update the data portion with rxq */
1107 dev->data->rx_queues[queue_idx] = rxq;
1113 * Initializes Receive Unit
1114 * Load mbufs in rx queue in advance
1117 vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
1119 struct vmxnet3_hw *hw = dev->data->dev_private;
1124 PMD_INIT_FUNC_TRACE();
1126 for (i = 0; i < hw->num_rx_queues; i++) {
1127 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
1129 for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
1130 /* Passing 0 as alloc_num will allocate full ring */
1131 ret = vmxnet3_post_rx_bufs(rxq, j);
1134 "ERROR: Posting Rxq: %d buffers ring: %d",
1139 * Updating device with the index:next2fill to fill the
1140 * mbufs for coming packets.
1142 if (unlikely(rxq->shared->ctrl.updateRxProd)) {
1143 VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
1144 rxq->cmd_ring[j].next2fill);
1147 rxq->stopped = FALSE;
1148 rxq->start_seg = NULL;
1151 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1152 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1154 txq->stopped = FALSE;
1160 static uint8_t rss_intel_key[40] = {
1161 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1162 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1163 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1164 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1165 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1169 * Configure RSS feature
1172 vmxnet3_rss_configure(struct rte_eth_dev *dev)
1174 struct vmxnet3_hw *hw = dev->data->dev_private;
1175 struct VMXNET3_RSSConf *dev_rss_conf;
1176 struct rte_eth_rss_conf *port_rss_conf;
1180 PMD_INIT_FUNC_TRACE();
1182 dev_rss_conf = hw->rss_conf;
1183 port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
1185 /* loading hashFunc */
1186 dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
1187 /* loading hashKeySize */
1188 dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
1189 /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
1190 dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
1192 if (port_rss_conf->rss_key == NULL) {
1193 /* Default hash key */
1194 port_rss_conf->rss_key = rss_intel_key;
1197 /* loading hashKey */
1198 memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
1199 dev_rss_conf->hashKeySize);
1201 /* loading indTable */
1202 for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {
1203 if (j == dev->data->nb_rx_queues)
1205 dev_rss_conf->indTable[i] = j;
1208 /* loading hashType */
1209 dev_rss_conf->hashType = 0;
1210 rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
1211 if (rss_hf & ETH_RSS_IPV4)
1212 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
1213 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1214 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
1215 if (rss_hf & ETH_RSS_IPV6)
1216 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
1217 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1218 dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
1220 return VMXNET3_SUCCESS;