1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev_driver.h>
35 #include <rte_prefetch.h>
41 #include <rte_string_fns.h>
43 #include "e1000_logs.h"
44 #include "base/e1000_api.h"
45 #include "e1000_ethdev.h"
46 #include "base/e1000_osdep.h"
48 #define E1000_TXD_VLAN_SHIFT 16
50 #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
52 #define E1000_TX_OFFLOAD_MASK ( \
57 #define E1000_TX_OFFLOAD_NOTSUP_MASK \
58 (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
61 * Structure associated with each descriptor of the RX ring of a RX queue.
64 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
68 * Structure associated with each descriptor of the TX ring of a TX queue.
71 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
72 uint16_t next_id; /**< Index of next descriptor in ring. */
73 uint16_t last_id; /**< Index of last scattered descriptor. */
77 * Structure associated with each RX queue.
80 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
81 volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
82 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
83 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
84 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
85 struct em_rx_entry *sw_ring; /**< address of RX software ring. */
86 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
87 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
88 uint16_t nb_rx_desc; /**< number of RX descriptors. */
89 uint16_t rx_tail; /**< current value of RDT register. */
90 uint16_t nb_rx_hold; /**< number of held free RX desc. */
91 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
92 uint16_t queue_id; /**< RX queue index. */
93 uint16_t port_id; /**< Device port identifier. */
94 uint8_t pthresh; /**< Prefetch threshold register. */
95 uint8_t hthresh; /**< Host threshold register. */
96 uint8_t wthresh; /**< Write-back threshold register. */
97 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
101 * Hardware context number
104 EM_CTX_0 = 0, /**< CTX0 */
105 EM_CTX_NUM = 1, /**< CTX NUM */
108 /** Offload features */
109 union em_vlan_macip {
112 uint16_t l3_len:9; /**< L3 (IP) Header Length. */
113 uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
115 /**< VLAN Tag Control Identifier (CPU order). */
120 * Compare mask for vlan_macip_len.data,
121 * should be in sync with em_vlan_macip.f layout.
123 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
124 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
125 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
126 /** MAC+IP length. */
127 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
130 * Structure to check if new context need be built
133 uint64_t flags; /**< ol_flags related to context build. */
134 uint32_t cmp_mask; /**< compare mask */
135 union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
139 * Structure associated with each TX queue.
142 volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
143 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
144 struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
145 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
146 uint16_t nb_tx_desc; /**< number of TX descriptors. */
147 uint16_t tx_tail; /**< Current value of TDT register. */
148 /**< Start freeing TX buffers if there are less free descriptors than
150 uint16_t tx_free_thresh;
151 /**< Number of TX descriptors to use before RS bit is set. */
152 uint16_t tx_rs_thresh;
153 /** Number of TX descriptors used since RS bit was set. */
155 /** Index to last TX descriptor to have been cleaned. */
156 uint16_t last_desc_cleaned;
157 /** Total number of TX descriptors ready to be allocated. */
159 uint16_t queue_id; /**< TX queue index. */
160 uint16_t port_id; /**< Device port identifier. */
161 uint8_t pthresh; /**< Prefetch threshold register. */
162 uint8_t hthresh; /**< Host threshold register. */
163 uint8_t wthresh; /**< Write-back threshold register. */
164 struct em_ctx_info ctx_cache;
165 /**< Hardware context history.*/
169 #define RTE_PMD_USE_PREFETCH
172 #ifdef RTE_PMD_USE_PREFETCH
173 #define rte_em_prefetch(p) rte_prefetch0(p)
175 #define rte_em_prefetch(p) do {} while(0)
178 #ifdef RTE_PMD_PACKET_PREFETCH
179 #define rte_packet_prefetch(p) rte_prefetch1(p)
181 #define rte_packet_prefetch(p) do {} while(0)
184 #ifndef DEFAULT_TX_FREE_THRESH
185 #define DEFAULT_TX_FREE_THRESH 32
186 #endif /* DEFAULT_TX_FREE_THRESH */
188 #ifndef DEFAULT_TX_RS_THRESH
189 #define DEFAULT_TX_RS_THRESH 32
190 #endif /* DEFAULT_TX_RS_THRESH */
193 /*********************************************************************
197 **********************************************************************/
200 * Populates TX context descriptor.
203 em_set_xmit_ctx(struct em_tx_queue* txq,
204 volatile struct e1000_context_desc *ctx_txd,
206 union em_vlan_macip hdrlen)
208 uint32_t cmp_mask, cmd_len;
209 uint16_t ipcse, l2len;
210 struct e1000_context_desc ctx;
213 cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
215 l2len = hdrlen.f.l2_len;
216 ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
218 /* setup IPCS* fields */
219 ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
220 ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
221 offsetof(struct ipv4_hdr, hdr_checksum));
224 * When doing checksum or TCP segmentation with IPv6 headers,
225 * IPCSE field should be set t0 0.
227 if (flags & PKT_TX_IP_CKSUM) {
228 ctx.lower_setup.ip_fields.ipcse =
229 (uint16_t)rte_cpu_to_le_16(ipcse - 1);
230 cmd_len |= E1000_TXD_CMD_IP;
231 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
233 ctx.lower_setup.ip_fields.ipcse = 0;
236 /* setup TUCS* fields */
237 ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
238 ctx.upper_setup.tcp_fields.tucse = 0;
240 switch (flags & PKT_TX_L4_MASK) {
241 case PKT_TX_UDP_CKSUM:
242 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
243 offsetof(struct udp_hdr, dgram_cksum));
244 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
246 case PKT_TX_TCP_CKSUM:
247 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
248 offsetof(struct tcp_hdr, cksum));
249 cmd_len |= E1000_TXD_CMD_TCP;
250 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
253 ctx.upper_setup.tcp_fields.tucso = 0;
256 ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
257 ctx.tcp_seg_setup.data = 0;
261 txq->ctx_cache.flags = flags;
262 txq->ctx_cache.cmp_mask = cmp_mask;
263 txq->ctx_cache.hdrlen = hdrlen;
267 * Check which hardware context can be used. Use the existing match
268 * or create a new context descriptor.
270 static inline uint32_t
271 what_ctx_update(struct em_tx_queue *txq, uint64_t flags,
272 union em_vlan_macip hdrlen)
274 /* If match with the current context */
275 if (likely (txq->ctx_cache.flags == flags &&
276 ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
277 txq->ctx_cache.cmp_mask) == 0))
284 /* Reset transmit descriptors after they have been used */
286 em_xmit_cleanup(struct em_tx_queue *txq)
288 struct em_tx_entry *sw_ring = txq->sw_ring;
289 volatile struct e1000_data_desc *txr = txq->tx_ring;
290 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
291 uint16_t nb_tx_desc = txq->nb_tx_desc;
292 uint16_t desc_to_clean_to;
293 uint16_t nb_tx_to_clean;
295 /* Determine the last descriptor needing to be cleaned */
296 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
297 if (desc_to_clean_to >= nb_tx_desc)
298 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
300 /* Check to make sure the last descriptor to clean is done */
301 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
302 if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
304 PMD_TX_FREE_LOG(DEBUG,
305 "TX descriptor %4u is not done"
306 "(port=%d queue=%d)", desc_to_clean_to,
307 txq->port_id, txq->queue_id);
308 /* Failed to clean any descriptors, better luck next time */
312 /* Figure out how many descriptors will be cleaned */
313 if (last_desc_cleaned > desc_to_clean_to)
314 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
317 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
320 PMD_TX_FREE_LOG(DEBUG,
321 "Cleaning %4u TX descriptors: %4u to %4u "
322 "(port=%d queue=%d)", nb_tx_to_clean,
323 last_desc_cleaned, desc_to_clean_to, txq->port_id,
327 * The last descriptor to clean is done, so that means all the
328 * descriptors from the last descriptor that was cleaned
329 * up to the last descriptor with the RS bit set
330 * are done. Only reset the threshold descriptor.
332 txr[desc_to_clean_to].upper.fields.status = 0;
334 /* Update the txq to reflect the last descriptor that was cleaned */
335 txq->last_desc_cleaned = desc_to_clean_to;
336 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
342 static inline uint32_t
343 tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
345 static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
346 static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
349 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
350 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
355 eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
358 struct em_tx_queue *txq;
359 struct em_tx_entry *sw_ring;
360 struct em_tx_entry *txe, *txn;
361 volatile struct e1000_data_desc *txr;
362 volatile struct e1000_data_desc *txd;
363 struct rte_mbuf *tx_pkt;
364 struct rte_mbuf *m_seg;
365 uint64_t buf_dma_addr;
367 uint32_t cmd_type_len;
377 union em_vlan_macip hdrlen;
380 sw_ring = txq->sw_ring;
382 tx_id = txq->tx_tail;
383 txe = &sw_ring[tx_id];
385 /* Determine if the descriptor ring needs to be cleaned. */
386 if (txq->nb_tx_free < txq->tx_free_thresh)
387 em_xmit_cleanup(txq);
390 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
394 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
397 * Determine how many (if any) context descriptors
398 * are needed for offload functionality.
400 ol_flags = tx_pkt->ol_flags;
402 /* If hardware offload required */
403 tx_ol_req = (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK));
405 hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
406 hdrlen.f.l2_len = tx_pkt->l2_len;
407 hdrlen.f.l3_len = tx_pkt->l3_len;
408 /* If new context to be built or reuse the exist ctx. */
409 ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
411 /* Only allocate context descriptor if required*/
412 new_ctx = (ctx == EM_CTX_NUM);
416 * Keep track of how many descriptors are used this loop
417 * This will always be the number of segments + the number of
418 * Context descriptors required to transmit the packet
420 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
423 * The number of descriptors that must be allocated for a
424 * packet is the number of segments of that packet, plus 1
425 * Context Descriptor for the hardware offload, if any.
426 * Determine the last TX descriptor to allocate in the TX ring
427 * for the packet, starting from the current position (tx_id)
430 tx_last = (uint16_t) (tx_id + nb_used - 1);
433 if (tx_last >= txq->nb_tx_desc)
434 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
436 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
437 " tx_first=%u tx_last=%u",
438 (unsigned) txq->port_id,
439 (unsigned) txq->queue_id,
440 (unsigned) tx_pkt->pkt_len,
445 * Make sure there are enough TX descriptors available to
446 * transmit the entire packet.
447 * nb_used better be less than or equal to txq->tx_rs_thresh
449 while (unlikely (nb_used > txq->nb_tx_free)) {
450 PMD_TX_FREE_LOG(DEBUG, "Not enough free TX descriptors "
451 "nb_used=%4u nb_free=%4u "
452 "(port=%d queue=%d)",
453 nb_used, txq->nb_tx_free,
454 txq->port_id, txq->queue_id);
456 if (em_xmit_cleanup(txq) != 0) {
457 /* Could not clean any descriptors */
465 * By now there are enough free TX descriptors to transmit
470 * Set common flags of all TX Data Descriptors.
472 * The following bits must be set in all Data Descriptors:
473 * - E1000_TXD_DTYP_DATA
474 * - E1000_TXD_DTYP_DEXT
476 * The following bits must be set in the first Data Descriptor
477 * and are ignored in the other ones:
478 * - E1000_TXD_POPTS_IXSM
479 * - E1000_TXD_POPTS_TXSM
481 * The following bits must be set in the last Data Descriptor
482 * and are ignored in the other ones:
483 * - E1000_TXD_CMD_VLE
484 * - E1000_TXD_CMD_IFCS
486 * The following bits must only be set in the last Data
488 * - E1000_TXD_CMD_EOP
490 * The following bits can be set in any Data Descriptor, but
491 * are only set in the last Data Descriptor:
494 cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
498 /* Set VLAN Tag offload fields. */
499 if (ol_flags & PKT_TX_VLAN_PKT) {
500 cmd_type_len |= E1000_TXD_CMD_VLE;
501 popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
506 * Setup the TX Context Descriptor if required
509 volatile struct e1000_context_desc *ctx_txd;
511 ctx_txd = (volatile struct e1000_context_desc *)
514 txn = &sw_ring[txe->next_id];
515 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
517 if (txe->mbuf != NULL) {
518 rte_pktmbuf_free_seg(txe->mbuf);
522 em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
525 txe->last_id = tx_last;
526 tx_id = txe->next_id;
531 * Setup the TX Data Descriptor,
532 * This path will go through
533 * whatever new/reuse the context descriptor
535 popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
541 txn = &sw_ring[txe->next_id];
543 if (txe->mbuf != NULL)
544 rte_pktmbuf_free_seg(txe->mbuf);
548 * Set up Transmit Data Descriptor.
550 slen = m_seg->data_len;
551 buf_dma_addr = rte_mbuf_data_iova(m_seg);
553 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
554 txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
555 txd->upper.data = rte_cpu_to_le_32(popts_spec);
557 txe->last_id = tx_last;
558 tx_id = txe->next_id;
561 } while (m_seg != NULL);
564 * The last packet data descriptor needs End Of Packet (EOP)
566 cmd_type_len |= E1000_TXD_CMD_EOP;
567 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
568 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
570 /* Set RS bit only on threshold packets' last descriptor */
571 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
572 PMD_TX_FREE_LOG(DEBUG,
573 "Setting RS bit on TXD id=%4u "
574 "(port=%d queue=%d)",
575 tx_last, txq->port_id, txq->queue_id);
577 cmd_type_len |= E1000_TXD_CMD_RS;
579 /* Update txq RS bit counters */
582 txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
588 * Set the Transmit Descriptor Tail (TDT)
590 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
591 (unsigned) txq->port_id, (unsigned) txq->queue_id,
592 (unsigned) tx_id, (unsigned) nb_tx);
593 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
594 txq->tx_tail = tx_id;
599 /*********************************************************************
603 **********************************************************************/
605 eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
611 for (i = 0; i < nb_pkts; i++) {
614 if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
615 rte_errno = -ENOTSUP;
619 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
620 ret = rte_validate_tx_offload(m);
626 ret = rte_net_intel_cksum_prepare(m);
636 /*********************************************************************
640 **********************************************************************/
642 static inline uint64_t
643 rx_desc_status_to_pkt_flags(uint32_t rx_status)
647 /* Check if VLAN present */
648 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
649 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
654 static inline uint64_t
655 rx_desc_error_to_pkt_flags(uint32_t rx_error)
657 uint64_t pkt_flags = 0;
659 if (rx_error & E1000_RXD_ERR_IPE)
660 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
661 if (rx_error & E1000_RXD_ERR_TCPE)
662 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
667 eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
670 volatile struct e1000_rx_desc *rx_ring;
671 volatile struct e1000_rx_desc *rxdp;
672 struct em_rx_queue *rxq;
673 struct em_rx_entry *sw_ring;
674 struct em_rx_entry *rxe;
675 struct rte_mbuf *rxm;
676 struct rte_mbuf *nmb;
677 struct e1000_rx_desc rxd;
689 rx_id = rxq->rx_tail;
690 rx_ring = rxq->rx_ring;
691 sw_ring = rxq->sw_ring;
692 while (nb_rx < nb_pkts) {
694 * The order of operations here is important as the DD status
695 * bit must not be read after any other descriptor fields.
696 * rx_ring and rxdp are pointing to volatile data so the order
697 * of accesses cannot be reordered by the compiler. If they were
698 * not volatile, they could be reordered which could lead to
699 * using invalid descriptor fields when read from rxd.
701 rxdp = &rx_ring[rx_id];
702 status = rxdp->status;
703 if (! (status & E1000_RXD_STAT_DD))
710 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
711 * likely to be invalid and to be dropped by the various
712 * validation checks performed by the network stack.
714 * Allocate a new mbuf to replenish the RX ring descriptor.
715 * If the allocation fails:
716 * - arrange for that RX descriptor to be the first one
717 * being parsed the next time the receive function is
718 * invoked [on the same queue].
720 * - Stop parsing the RX ring and return immediately.
722 * This policy do not drop the packet received in the RX
723 * descriptor for which the allocation of a new mbuf failed.
724 * Thus, it allows that packet to be later retrieved if
725 * mbuf have been freed in the mean time.
726 * As a side effect, holding RX descriptors instead of
727 * systematically giving them back to the NIC may lead to
728 * RX ring exhaustion situations.
729 * However, the NIC can gracefully prevent such situations
730 * to happen by sending specific "back-pressure" flow control
731 * frames to its peer(s).
733 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
734 "status=0x%x pkt_len=%u",
735 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
736 (unsigned) rx_id, (unsigned) status,
737 (unsigned) rte_le_to_cpu_16(rxd.length));
739 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
741 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
743 (unsigned) rxq->port_id,
744 (unsigned) rxq->queue_id);
745 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
750 rxe = &sw_ring[rx_id];
752 if (rx_id == rxq->nb_rx_desc)
755 /* Prefetch next mbuf while processing current one. */
756 rte_em_prefetch(sw_ring[rx_id].mbuf);
759 * When next RX descriptor is on a cache-line boundary,
760 * prefetch the next 4 RX descriptors and the next 8 pointers
763 if ((rx_id & 0x3) == 0) {
764 rte_em_prefetch(&rx_ring[rx_id]);
765 rte_em_prefetch(&sw_ring[rx_id]);
768 /* Rearm RXD: attach new mbuf and reset status to zero. */
773 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
774 rxdp->buffer_addr = dma_addr;
778 * Initialize the returned mbuf.
779 * 1) setup generic mbuf fields:
780 * - number of segments,
783 * - RX port identifier.
784 * 2) integrate hardware offload data, if any:
786 * - IP checksum flag,
787 * - VLAN TCI, if any,
790 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
792 rxm->data_off = RTE_PKTMBUF_HEADROOM;
793 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
796 rxm->pkt_len = pkt_len;
797 rxm->data_len = pkt_len;
798 rxm->port = rxq->port_id;
800 rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
801 rxm->ol_flags = rxm->ol_flags |
802 rx_desc_error_to_pkt_flags(rxd.errors);
804 /* Only valid if PKT_RX_VLAN set in pkt_flags */
805 rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
808 * Store the mbuf address into the next entry of the array
809 * of returned packets.
811 rx_pkts[nb_rx++] = rxm;
813 rxq->rx_tail = rx_id;
816 * If the number of free RX descriptors is greater than the RX free
817 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
819 * Update the RDT with the value of the last processed RX descriptor
820 * minus 1, to guarantee that the RDT register is never equal to the
821 * RDH register, which creates a "full" ring situtation from the
822 * hardware point of view...
824 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
825 if (nb_hold > rxq->rx_free_thresh) {
826 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
827 "nb_hold=%u nb_rx=%u",
828 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
829 (unsigned) rx_id, (unsigned) nb_hold,
831 rx_id = (uint16_t) ((rx_id == 0) ?
832 (rxq->nb_rx_desc - 1) : (rx_id - 1));
833 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
836 rxq->nb_rx_hold = nb_hold;
841 eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
844 struct em_rx_queue *rxq;
845 volatile struct e1000_rx_desc *rx_ring;
846 volatile struct e1000_rx_desc *rxdp;
847 struct em_rx_entry *sw_ring;
848 struct em_rx_entry *rxe;
849 struct rte_mbuf *first_seg;
850 struct rte_mbuf *last_seg;
851 struct rte_mbuf *rxm;
852 struct rte_mbuf *nmb;
853 struct e1000_rx_desc rxd;
854 uint64_t dma; /* Physical address of mbuf data buffer */
865 rx_id = rxq->rx_tail;
866 rx_ring = rxq->rx_ring;
867 sw_ring = rxq->sw_ring;
870 * Retrieve RX context of current packet, if any.
872 first_seg = rxq->pkt_first_seg;
873 last_seg = rxq->pkt_last_seg;
875 while (nb_rx < nb_pkts) {
878 * The order of operations here is important as the DD status
879 * bit must not be read after any other descriptor fields.
880 * rx_ring and rxdp are pointing to volatile data so the order
881 * of accesses cannot be reordered by the compiler. If they were
882 * not volatile, they could be reordered which could lead to
883 * using invalid descriptor fields when read from rxd.
885 rxdp = &rx_ring[rx_id];
886 status = rxdp->status;
887 if (! (status & E1000_RXD_STAT_DD))
894 * Allocate a new mbuf to replenish the RX ring descriptor.
895 * If the allocation fails:
896 * - arrange for that RX descriptor to be the first one
897 * being parsed the next time the receive function is
898 * invoked [on the same queue].
900 * - Stop parsing the RX ring and return immediately.
902 * This policy does not drop the packet received in the RX
903 * descriptor for which the allocation of a new mbuf failed.
904 * Thus, it allows that packet to be later retrieved if
905 * mbuf have been freed in the mean time.
906 * As a side effect, holding RX descriptors instead of
907 * systematically giving them back to the NIC may lead to
908 * RX ring exhaustion situations.
909 * However, the NIC can gracefully prevent such situations
910 * to happen by sending specific "back-pressure" flow control
911 * frames to its peer(s).
913 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
914 "status=0x%x data_len=%u",
915 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
916 (unsigned) rx_id, (unsigned) status,
917 (unsigned) rte_le_to_cpu_16(rxd.length));
919 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
921 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
922 "queue_id=%u", (unsigned) rxq->port_id,
923 (unsigned) rxq->queue_id);
924 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
929 rxe = &sw_ring[rx_id];
931 if (rx_id == rxq->nb_rx_desc)
934 /* Prefetch next mbuf while processing current one. */
935 rte_em_prefetch(sw_ring[rx_id].mbuf);
938 * When next RX descriptor is on a cache-line boundary,
939 * prefetch the next 4 RX descriptors and the next 8 pointers
942 if ((rx_id & 0x3) == 0) {
943 rte_em_prefetch(&rx_ring[rx_id]);
944 rte_em_prefetch(&sw_ring[rx_id]);
948 * Update RX descriptor with the physical address of the new
949 * data buffer of the new allocated mbuf.
953 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
954 rxdp->buffer_addr = dma;
958 * Set data length & data buffer address of mbuf.
960 data_len = rte_le_to_cpu_16(rxd.length);
961 rxm->data_len = data_len;
962 rxm->data_off = RTE_PKTMBUF_HEADROOM;
965 * If this is the first buffer of the received packet,
966 * set the pointer to the first mbuf of the packet and
967 * initialize its context.
968 * Otherwise, update the total length and the number of segments
969 * of the current scattered packet, and update the pointer to
970 * the last mbuf of the current packet.
972 if (first_seg == NULL) {
974 first_seg->pkt_len = data_len;
975 first_seg->nb_segs = 1;
977 first_seg->pkt_len += data_len;
978 first_seg->nb_segs++;
979 last_seg->next = rxm;
983 * If this is not the last buffer of the received packet,
984 * update the pointer to the last mbuf of the current scattered
985 * packet and continue to parse the RX ring.
987 if (! (status & E1000_RXD_STAT_EOP)) {
993 * This is the last buffer of the received packet.
994 * If the CRC is not stripped by the hardware:
995 * - Subtract the CRC length from the total packet length.
996 * - If the last buffer only contains the whole CRC or a part
997 * of it, free the mbuf associated to the last buffer.
998 * If part of the CRC is also contained in the previous
999 * mbuf, subtract the length of that CRC part from the
1000 * data length of the previous mbuf.
1003 if (unlikely(rxq->crc_len > 0)) {
1004 first_seg->pkt_len -= ETHER_CRC_LEN;
1005 if (data_len <= ETHER_CRC_LEN) {
1006 rte_pktmbuf_free_seg(rxm);
1007 first_seg->nb_segs--;
1008 last_seg->data_len = (uint16_t)
1009 (last_seg->data_len -
1010 (ETHER_CRC_LEN - data_len));
1011 last_seg->next = NULL;
1014 (uint16_t) (data_len - ETHER_CRC_LEN);
1018 * Initialize the first mbuf of the returned packet:
1019 * - RX port identifier,
1020 * - hardware offload data, if any:
1021 * - IP checksum flag,
1024 first_seg->port = rxq->port_id;
1026 first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
1027 first_seg->ol_flags = first_seg->ol_flags |
1028 rx_desc_error_to_pkt_flags(rxd.errors);
1030 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1031 rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
1033 /* Prefetch data of first segment, if configured to do so. */
1034 rte_packet_prefetch((char *)first_seg->buf_addr +
1035 first_seg->data_off);
1038 * Store the mbuf address into the next entry of the array
1039 * of returned packets.
1041 rx_pkts[nb_rx++] = first_seg;
1044 * Setup receipt context for a new packet.
1050 * Record index of the next RX descriptor to probe.
1052 rxq->rx_tail = rx_id;
1055 * Save receive context.
1057 rxq->pkt_first_seg = first_seg;
1058 rxq->pkt_last_seg = last_seg;
1061 * If the number of free RX descriptors is greater than the RX free
1062 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1064 * Update the RDT with the value of the last processed RX descriptor
1065 * minus 1, to guarantee that the RDT register is never equal to the
1066 * RDH register, which creates a "full" ring situtation from the
1067 * hardware point of view...
1069 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1070 if (nb_hold > rxq->rx_free_thresh) {
1071 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1072 "nb_hold=%u nb_rx=%u",
1073 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1074 (unsigned) rx_id, (unsigned) nb_hold,
1076 rx_id = (uint16_t) ((rx_id == 0) ?
1077 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1078 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1081 rxq->nb_rx_hold = nb_hold;
1085 #define EM_MAX_BUF_SIZE 16384
1086 #define EM_RCTL_FLXBUF_STEP 1024
1089 em_tx_queue_release_mbufs(struct em_tx_queue *txq)
1093 if (txq->sw_ring != NULL) {
1094 for (i = 0; i != txq->nb_tx_desc; i++) {
1095 if (txq->sw_ring[i].mbuf != NULL) {
1096 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1097 txq->sw_ring[i].mbuf = NULL;
1104 em_tx_queue_release(struct em_tx_queue *txq)
1107 em_tx_queue_release_mbufs(txq);
1108 rte_free(txq->sw_ring);
1114 eth_em_tx_queue_release(void *txq)
1116 em_tx_queue_release(txq);
1119 /* (Re)set dynamic em_tx_queue fields to defaults */
1121 em_reset_tx_queue(struct em_tx_queue *txq)
1123 uint16_t i, nb_desc, prev;
1124 static const struct e1000_data_desc txd_init = {
1125 .upper.fields = {.status = E1000_TXD_STAT_DD},
1128 nb_desc = txq->nb_tx_desc;
1130 /* Initialize ring entries */
1132 prev = (uint16_t) (nb_desc - 1);
1134 for (i = 0; i < nb_desc; i++) {
1135 txq->tx_ring[i] = txd_init;
1136 txq->sw_ring[i].mbuf = NULL;
1137 txq->sw_ring[i].last_id = i;
1138 txq->sw_ring[prev].next_id = i;
1143 * Always allow 1 descriptor to be un-allocated to avoid
1144 * a H/W race condition
1146 txq->nb_tx_free = (uint16_t)(nb_desc - 1);
1147 txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
1148 txq->nb_tx_used = 0;
1151 memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
1155 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
1158 unsigned int socket_id,
1159 const struct rte_eth_txconf *tx_conf)
1161 const struct rte_memzone *tz;
1162 struct em_tx_queue *txq;
1163 struct e1000_hw *hw;
1165 uint16_t tx_rs_thresh, tx_free_thresh;
1167 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1170 * Validate number of transmit descriptors.
1171 * It must not exceed hardware maximum, and must be multiple
1174 if (nb_desc % EM_TXD_ALIGN != 0 ||
1175 (nb_desc > E1000_MAX_RING_DESC) ||
1176 (nb_desc < E1000_MIN_RING_DESC)) {
1180 tx_free_thresh = tx_conf->tx_free_thresh;
1181 if (tx_free_thresh == 0)
1182 tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
1183 DEFAULT_TX_FREE_THRESH);
1185 tx_rs_thresh = tx_conf->tx_rs_thresh;
1186 if (tx_rs_thresh == 0)
1187 tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
1188 DEFAULT_TX_RS_THRESH);
1190 if (tx_free_thresh >= (nb_desc - 3)) {
1191 PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
1192 "number of TX descriptors minus 3. "
1193 "(tx_free_thresh=%u port=%d queue=%d)",
1194 (unsigned int)tx_free_thresh,
1195 (int)dev->data->port_id, (int)queue_idx);
1198 if (tx_rs_thresh > tx_free_thresh) {
1199 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
1200 "tx_free_thresh. (tx_free_thresh=%u "
1201 "tx_rs_thresh=%u port=%d queue=%d)",
1202 (unsigned int)tx_free_thresh,
1203 (unsigned int)tx_rs_thresh,
1204 (int)dev->data->port_id,
1210 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1211 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1212 * by the NIC and all descriptors are written back after the NIC
1213 * accumulates WTHRESH descriptors.
1215 if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
1216 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1217 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
1218 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
1219 (int)dev->data->port_id, (int)queue_idx);
1223 /* Free memory prior to re-allocation if needed... */
1224 if (dev->data->tx_queues[queue_idx] != NULL) {
1225 em_tx_queue_release(dev->data->tx_queues[queue_idx]);
1226 dev->data->tx_queues[queue_idx] = NULL;
1230 * Allocate TX ring hardware descriptors. A memzone large enough to
1231 * handle the maximum ring size is allocated in order to allow for
1232 * resizing in later calls to the queue setup function.
1234 tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
1235 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
1236 RTE_CACHE_LINE_SIZE, socket_id);
1240 /* Allocate the tx queue data structure. */
1241 if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
1242 RTE_CACHE_LINE_SIZE)) == NULL)
1245 /* Allocate software ring */
1246 if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
1247 sizeof(txq->sw_ring[0]) * nb_desc,
1248 RTE_CACHE_LINE_SIZE)) == NULL) {
1249 em_tx_queue_release(txq);
1253 txq->nb_tx_desc = nb_desc;
1254 txq->tx_free_thresh = tx_free_thresh;
1255 txq->tx_rs_thresh = tx_rs_thresh;
1256 txq->pthresh = tx_conf->tx_thresh.pthresh;
1257 txq->hthresh = tx_conf->tx_thresh.hthresh;
1258 txq->wthresh = tx_conf->tx_thresh.wthresh;
1259 txq->queue_id = queue_idx;
1260 txq->port_id = dev->data->port_id;
1262 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1263 txq->tx_ring_phys_addr = tz->iova;
1264 txq->tx_ring = (struct e1000_data_desc *) tz->addr;
1266 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1267 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1269 em_reset_tx_queue(txq);
1271 dev->data->tx_queues[queue_idx] = txq;
1276 em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
1280 if (rxq->sw_ring != NULL) {
1281 for (i = 0; i != rxq->nb_rx_desc; i++) {
1282 if (rxq->sw_ring[i].mbuf != NULL) {
1283 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1284 rxq->sw_ring[i].mbuf = NULL;
1291 em_rx_queue_release(struct em_rx_queue *rxq)
1294 em_rx_queue_release_mbufs(rxq);
1295 rte_free(rxq->sw_ring);
1301 eth_em_rx_queue_release(void *rxq)
1303 em_rx_queue_release(rxq);
1306 /* Reset dynamic em_rx_queue fields back to defaults */
1308 em_reset_rx_queue(struct em_rx_queue *rxq)
1311 rxq->nb_rx_hold = 0;
1312 rxq->pkt_first_seg = NULL;
1313 rxq->pkt_last_seg = NULL;
1317 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
1320 unsigned int socket_id,
1321 const struct rte_eth_rxconf *rx_conf,
1322 struct rte_mempool *mp)
1324 const struct rte_memzone *rz;
1325 struct em_rx_queue *rxq;
1326 struct e1000_hw *hw;
1329 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1332 * Validate number of receive descriptors.
1333 * It must not exceed hardware maximum, and must be multiple
1336 if (nb_desc % EM_RXD_ALIGN != 0 ||
1337 (nb_desc > E1000_MAX_RING_DESC) ||
1338 (nb_desc < E1000_MIN_RING_DESC)) {
1343 * EM devices don't support drop_en functionality
1345 if (rx_conf->rx_drop_en) {
1346 PMD_INIT_LOG(ERR, "drop_en functionality not supported by "
1351 /* Free memory prior to re-allocation if needed. */
1352 if (dev->data->rx_queues[queue_idx] != NULL) {
1353 em_rx_queue_release(dev->data->rx_queues[queue_idx]);
1354 dev->data->rx_queues[queue_idx] = NULL;
1357 /* Allocate RX ring for max possible mumber of hardware descriptors. */
1358 rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
1359 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
1360 RTE_CACHE_LINE_SIZE, socket_id);
1364 /* Allocate the RX queue data structure. */
1365 if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
1366 RTE_CACHE_LINE_SIZE)) == NULL)
1369 /* Allocate software ring. */
1370 if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1371 sizeof (rxq->sw_ring[0]) * nb_desc,
1372 RTE_CACHE_LINE_SIZE)) == NULL) {
1373 em_rx_queue_release(rxq);
1378 rxq->nb_rx_desc = nb_desc;
1379 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1380 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1381 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1382 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1383 rxq->queue_id = queue_idx;
1384 rxq->port_id = dev->data->port_id;
1385 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
1388 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1389 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
1390 rxq->rx_ring_phys_addr = rz->iova;
1391 rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
1393 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1394 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1396 dev->data->rx_queues[queue_idx] = rxq;
1397 em_reset_rx_queue(rxq);
1403 eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1405 #define EM_RXQ_SCAN_INTERVAL 4
1406 volatile struct e1000_rx_desc *rxdp;
1407 struct em_rx_queue *rxq;
1410 rxq = dev->data->rx_queues[rx_queue_id];
1411 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1413 while ((desc < rxq->nb_rx_desc) &&
1414 (rxdp->status & E1000_RXD_STAT_DD)) {
1415 desc += EM_RXQ_SCAN_INTERVAL;
1416 rxdp += EM_RXQ_SCAN_INTERVAL;
1417 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1418 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1419 desc - rxq->nb_rx_desc]);
1426 eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
1428 volatile struct e1000_rx_desc *rxdp;
1429 struct em_rx_queue *rxq = rx_queue;
1432 if (unlikely(offset >= rxq->nb_rx_desc))
1434 desc = rxq->rx_tail + offset;
1435 if (desc >= rxq->nb_rx_desc)
1436 desc -= rxq->nb_rx_desc;
1438 rxdp = &rxq->rx_ring[desc];
1439 return !!(rxdp->status & E1000_RXD_STAT_DD);
1443 eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset)
1445 struct em_rx_queue *rxq = rx_queue;
1446 volatile uint8_t *status;
1449 if (unlikely(offset >= rxq->nb_rx_desc))
1452 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1453 return RTE_ETH_RX_DESC_UNAVAIL;
1455 desc = rxq->rx_tail + offset;
1456 if (desc >= rxq->nb_rx_desc)
1457 desc -= rxq->nb_rx_desc;
1459 status = &rxq->rx_ring[desc].status;
1460 if (*status & E1000_RXD_STAT_DD)
1461 return RTE_ETH_RX_DESC_DONE;
1463 return RTE_ETH_RX_DESC_AVAIL;
1467 eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset)
1469 struct em_tx_queue *txq = tx_queue;
1470 volatile uint8_t *status;
1473 if (unlikely(offset >= txq->nb_tx_desc))
1476 desc = txq->tx_tail + offset;
1477 /* go to next desc that has the RS bit */
1478 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1480 if (desc >= txq->nb_tx_desc) {
1481 desc -= txq->nb_tx_desc;
1482 if (desc >= txq->nb_tx_desc)
1483 desc -= txq->nb_tx_desc;
1486 status = &txq->tx_ring[desc].upper.fields.status;
1487 if (*status & E1000_TXD_STAT_DD)
1488 return RTE_ETH_TX_DESC_DONE;
1490 return RTE_ETH_TX_DESC_FULL;
1494 em_dev_clear_queues(struct rte_eth_dev *dev)
1497 struct em_tx_queue *txq;
1498 struct em_rx_queue *rxq;
1500 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1501 txq = dev->data->tx_queues[i];
1503 em_tx_queue_release_mbufs(txq);
1504 em_reset_tx_queue(txq);
1508 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1509 rxq = dev->data->rx_queues[i];
1511 em_rx_queue_release_mbufs(rxq);
1512 em_reset_rx_queue(rxq);
1518 em_dev_free_queues(struct rte_eth_dev *dev)
1522 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1523 eth_em_rx_queue_release(dev->data->rx_queues[i]);
1524 dev->data->rx_queues[i] = NULL;
1526 dev->data->nb_rx_queues = 0;
1528 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1529 eth_em_tx_queue_release(dev->data->tx_queues[i]);
1530 dev->data->tx_queues[i] = NULL;
1532 dev->data->nb_tx_queues = 0;
1536 * Takes as input/output parameter RX buffer size.
1537 * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
1540 em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
1543 * For BSIZE & BSEX all configurable sizes are:
1544 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1545 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1546 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1547 * 2048: rctl |= E1000_RCTL_SZ_2048;
1548 * 1024: rctl |= E1000_RCTL_SZ_1024;
1549 * 512: rctl |= E1000_RCTL_SZ_512;
1550 * 256: rctl |= E1000_RCTL_SZ_256;
1552 static const struct {
1555 } bufsz_to_rctl[] = {
1556 {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
1557 {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
1558 {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
1559 {2048, E1000_RCTL_SZ_2048},
1560 {1024, E1000_RCTL_SZ_1024},
1561 {512, E1000_RCTL_SZ_512},
1562 {256, E1000_RCTL_SZ_256},
1566 uint32_t rctl_bsize;
1568 rctl_bsize = *bufsz;
1571 * Starting from 82571 it is possible to specify RX buffer size
1572 * by RCTL.FLXBUF. When this field is different from zero, the
1573 * RX buffer size = RCTL.FLXBUF * 1K
1574 * (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
1575 * It is working ok on real HW, but by some reason doesn't work
1576 * on VMware emulated 82574L.
1577 * So for now, always use BSIZE/BSEX to setup RX buffer size.
1578 * If you don't plan to use it on VMware emulated 82574L and
1579 * would like to specify RX buffer size in 1K granularity,
1580 * uncomment the following lines:
1581 * ***************************************************************
1582 * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
1583 * rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
1584 * rctl_bsize /= EM_RCTL_FLXBUF_STEP;
1585 * *bufsz = rctl_bsize;
1586 * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
1587 * E1000_RCTL_FLXBUF_MASK);
1589 * ***************************************************************
1592 for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
1594 if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
1595 *bufsz = bufsz_to_rctl[i].bufsz;
1596 return bufsz_to_rctl[i].rctl;
1600 /* Should never happen. */
1605 em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
1607 struct em_rx_entry *rxe = rxq->sw_ring;
1610 static const struct e1000_rx_desc rxd_init = {
1614 /* Initialize software ring entries */
1615 for (i = 0; i < rxq->nb_rx_desc; i++) {
1616 volatile struct e1000_rx_desc *rxd;
1617 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1620 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1621 "queue_id=%hu", rxq->queue_id);
1626 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
1628 /* Clear HW ring memory */
1629 rxq->rx_ring[i] = rxd_init;
1631 rxd = &rxq->rx_ring[i];
1632 rxd->buffer_addr = dma_addr;
1639 /*********************************************************************
1641 * Enable receive unit.
1643 **********************************************************************/
1645 eth_em_rx_init(struct rte_eth_dev *dev)
1647 struct e1000_hw *hw;
1648 struct em_rx_queue *rxq;
1652 uint32_t rctl_bsize;
1656 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1659 * Make sure receives are disabled while setting
1660 * up the descriptor ring.
1662 rctl = E1000_READ_REG(hw, E1000_RCTL);
1663 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1665 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1667 /* Disable extended descriptor type. */
1668 rfctl &= ~E1000_RFCTL_EXTEN;
1669 /* Disable accelerated acknowledge */
1670 if (hw->mac.type == e1000_82574)
1671 rfctl |= E1000_RFCTL_ACK_DIS;
1673 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1676 * XXX TEMPORARY WORKAROUND: on some systems with 82573
1677 * long latencies are observed, like Lenovo X60. This
1678 * change eliminates the problem, but since having positive
1679 * values in RDTR is a known source of problems on other
1680 * platforms another solution is being sought.
1682 if (hw->mac.type == e1000_82573)
1683 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
1685 dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
1687 /* Determine RX bufsize. */
1688 rctl_bsize = EM_MAX_BUF_SIZE;
1689 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1692 rxq = dev->data->rx_queues[i];
1693 buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
1694 RTE_PKTMBUF_HEADROOM;
1695 rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
1698 rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
1700 /* Configure and enable each RX queue. */
1701 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1705 rxq = dev->data->rx_queues[i];
1707 /* Allocate buffers for descriptor rings and setup queue */
1708 ret = em_alloc_rx_queue_mbufs(rxq);
1713 * Reset crc_len in case it was changed after queue setup by a
1717 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1720 bus_addr = rxq->rx_ring_phys_addr;
1721 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1723 sizeof(*rxq->rx_ring));
1724 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1725 (uint32_t)(bus_addr >> 32));
1726 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1728 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1729 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1731 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1732 rxdctl &= 0xFE000000;
1733 rxdctl |= rxq->pthresh & 0x3F;
1734 rxdctl |= (rxq->hthresh & 0x3F) << 8;
1735 rxdctl |= (rxq->wthresh & 0x3F) << 16;
1736 rxdctl |= E1000_RXDCTL_GRAN;
1737 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1740 * Due to EM devices not having any sort of hardware
1741 * limit for packet length, jumbo frame of any size
1742 * can be accepted, thus we have to enable scattered
1743 * rx if jumbo frames are enabled (or if buffer size
1744 * is too small to accommodate non-jumbo packets)
1745 * to avoid splitting packets that don't fit into
1748 if (dev->data->dev_conf.rxmode.jumbo_frame ||
1749 rctl_bsize < ETHER_MAX_LEN) {
1750 if (!dev->data->scattered_rx)
1751 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
1753 (eth_rx_burst_t)eth_em_recv_scattered_pkts;
1754 dev->data->scattered_rx = 1;
1758 if (dev->data->dev_conf.rxmode.enable_scatter) {
1759 if (!dev->data->scattered_rx)
1760 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
1761 dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
1762 dev->data->scattered_rx = 1;
1766 * Setup the Checksum Register.
1767 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1769 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1771 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1772 rxcsum |= E1000_RXCSUM_IPOFL;
1774 rxcsum &= ~E1000_RXCSUM_IPOFL;
1775 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1777 /* No MRQ or RSS support for now */
1779 /* Set early receive threshold on appropriate hw */
1780 if ((hw->mac.type == e1000_ich9lan ||
1781 hw->mac.type == e1000_pch2lan ||
1782 hw->mac.type == e1000_ich10lan) &&
1783 dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1784 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1785 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
1786 E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
1789 if (hw->mac.type == e1000_pch2lan) {
1790 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1791 e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
1793 e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
1796 /* Setup the Receive Control Register. */
1797 if (dev->data->dev_conf.rxmode.hw_strip_crc)
1798 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1800 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1802 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1803 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1804 E1000_RCTL_RDMTS_HALF |
1805 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1807 /* Make sure VLAN Filters are off. */
1808 rctl &= ~E1000_RCTL_VFE;
1809 /* Don't store bad packets. */
1810 rctl &= ~E1000_RCTL_SBP;
1811 /* Legacy descriptor type. */
1812 rctl &= ~E1000_RCTL_DTYP_MASK;
1815 * Configure support of jumbo frames, if any.
1817 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1818 rctl |= E1000_RCTL_LPE;
1820 rctl &= ~E1000_RCTL_LPE;
1822 /* Enable Receives. */
1823 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1828 /*********************************************************************
1830 * Enable transmit unit.
1832 **********************************************************************/
1834 eth_em_tx_init(struct rte_eth_dev *dev)
1836 struct e1000_hw *hw;
1837 struct em_tx_queue *txq;
1842 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1844 /* Setup the Base and Length of the Tx Descriptor Rings. */
1845 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1848 txq = dev->data->tx_queues[i];
1849 bus_addr = txq->tx_ring_phys_addr;
1850 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1852 sizeof(*txq->tx_ring));
1853 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1854 (uint32_t)(bus_addr >> 32));
1855 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1857 /* Setup the HW Tx Head and Tail descriptor pointers. */
1858 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1859 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1861 /* Setup Transmit threshold registers. */
1862 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1864 * bit 22 is reserved, on some models should always be 0,
1865 * on others - always 1.
1867 txdctl &= E1000_TXDCTL_COUNT_DESC;
1868 txdctl |= txq->pthresh & 0x3F;
1869 txdctl |= (txq->hthresh & 0x3F) << 8;
1870 txdctl |= (txq->wthresh & 0x3F) << 16;
1871 txdctl |= E1000_TXDCTL_GRAN;
1872 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1875 /* Program the Transmit Control Register. */
1876 tctl = E1000_READ_REG(hw, E1000_TCTL);
1877 tctl &= ~E1000_TCTL_CT;
1878 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1879 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1881 /* This write will effectively turn on the transmit unit. */
1882 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1886 em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1887 struct rte_eth_rxq_info *qinfo)
1889 struct em_rx_queue *rxq;
1891 rxq = dev->data->rx_queues[queue_id];
1893 qinfo->mp = rxq->mb_pool;
1894 qinfo->scattered_rx = dev->data->scattered_rx;
1895 qinfo->nb_desc = rxq->nb_rx_desc;
1896 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
1900 em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1901 struct rte_eth_txq_info *qinfo)
1903 struct em_tx_queue *txq;
1905 txq = dev->data->tx_queues[queue_id];
1907 qinfo->nb_desc = txq->nb_tx_desc;
1909 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
1910 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
1911 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
1912 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
1913 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;