1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
21 #include <rte_bus_pci.h>
22 #include <rte_memory.h>
23 #include <rte_memcpy.h>
24 #include <rte_memzone.h>
25 #include <rte_launch.h>
27 #include <rte_per_lcore.h>
28 #include <rte_lcore.h>
29 #include <rte_atomic.h>
30 #include <rte_branch_prediction.h>
31 #include <rte_mempool.h>
32 #include <rte_malloc.h>
34 #include <rte_ether.h>
35 #include <ethdev_driver.h>
36 #include <rte_prefetch.h>
42 #include <rte_string_fns.h>
44 #include "e1000_logs.h"
45 #include "base/e1000_api.h"
46 #include "e1000_ethdev.h"
47 #include "base/e1000_osdep.h"
49 #define E1000_TXD_VLAN_SHIFT 16
51 #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
53 #define E1000_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV6 | \
54 RTE_MBUF_F_TX_IPV4 | \
55 RTE_MBUF_F_TX_IP_CKSUM | \
56 RTE_MBUF_F_TX_L4_MASK | \
57 RTE_MBUF_F_TX_VLAN_PKT)
59 #define E1000_TX_OFFLOAD_NOTSUP_MASK \
60 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
62 /* PCI offset for querying configuration status register */
63 #define PCI_CFG_STATUS_REG 0x06
64 #define FLUSH_DESC_REQUIRED 0x100
68 * Structure associated with each descriptor of the RX ring of a RX queue.
71 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
75 * Structure associated with each descriptor of the TX ring of a TX queue.
78 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
79 uint16_t next_id; /**< Index of next descriptor in ring. */
80 uint16_t last_id; /**< Index of last scattered descriptor. */
84 * Structure associated with each RX queue.
87 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
88 volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
89 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
90 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
91 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
92 struct em_rx_entry *sw_ring; /**< address of RX software ring. */
93 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
94 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
95 uint64_t offloads; /**< Offloads of DEV_RX_OFFLOAD_* */
96 uint16_t nb_rx_desc; /**< number of RX descriptors. */
97 uint16_t rx_tail; /**< current value of RDT register. */
98 uint16_t nb_rx_hold; /**< number of held free RX desc. */
99 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
100 uint16_t queue_id; /**< RX queue index. */
101 uint16_t port_id; /**< Device port identifier. */
102 uint8_t pthresh; /**< Prefetch threshold register. */
103 uint8_t hthresh; /**< Host threshold register. */
104 uint8_t wthresh; /**< Write-back threshold register. */
105 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
109 * Hardware context number
112 EM_CTX_0 = 0, /**< CTX0 */
113 EM_CTX_NUM = 1, /**< CTX NUM */
116 /** Offload features */
117 union em_vlan_macip {
120 uint16_t l3_len:9; /**< L3 (IP) Header Length. */
121 uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
123 /**< VLAN Tag Control Identifier (CPU order). */
128 * Compare mask for vlan_macip_len.data,
129 * should be in sync with em_vlan_macip.f layout.
131 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
132 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
133 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
134 /** MAC+IP length. */
135 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
138 * Structure to check if new context need be built
141 uint64_t flags; /**< ol_flags related to context build. */
142 uint32_t cmp_mask; /**< compare mask */
143 union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
147 * Structure associated with each TX queue.
150 volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
151 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
152 struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
153 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
154 uint16_t nb_tx_desc; /**< number of TX descriptors. */
155 uint16_t tx_tail; /**< Current value of TDT register. */
156 /**< Start freeing TX buffers if there are less free descriptors than
158 uint16_t tx_free_thresh;
159 /**< Number of TX descriptors to use before RS bit is set. */
160 uint16_t tx_rs_thresh;
161 /** Number of TX descriptors used since RS bit was set. */
163 /** Index to last TX descriptor to have been cleaned. */
164 uint16_t last_desc_cleaned;
165 /** Total number of TX descriptors ready to be allocated. */
167 uint16_t queue_id; /**< TX queue index. */
168 uint16_t port_id; /**< Device port identifier. */
169 uint8_t pthresh; /**< Prefetch threshold register. */
170 uint8_t hthresh; /**< Host threshold register. */
171 uint8_t wthresh; /**< Write-back threshold register. */
172 struct em_ctx_info ctx_cache;
173 /**< Hardware context history.*/
174 uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
178 #define RTE_PMD_USE_PREFETCH
181 #ifdef RTE_PMD_USE_PREFETCH
182 #define rte_em_prefetch(p) rte_prefetch0(p)
184 #define rte_em_prefetch(p) do {} while(0)
187 #ifdef RTE_PMD_PACKET_PREFETCH
188 #define rte_packet_prefetch(p) rte_prefetch1(p)
190 #define rte_packet_prefetch(p) do {} while(0)
193 #ifndef DEFAULT_TX_FREE_THRESH
194 #define DEFAULT_TX_FREE_THRESH 32
195 #endif /* DEFAULT_TX_FREE_THRESH */
197 #ifndef DEFAULT_TX_RS_THRESH
198 #define DEFAULT_TX_RS_THRESH 32
199 #endif /* DEFAULT_TX_RS_THRESH */
202 /*********************************************************************
206 **********************************************************************/
209 * Populates TX context descriptor.
212 em_set_xmit_ctx(struct em_tx_queue* txq,
213 volatile struct e1000_context_desc *ctx_txd,
215 union em_vlan_macip hdrlen)
217 uint32_t cmp_mask, cmd_len;
218 uint16_t ipcse, l2len;
219 struct e1000_context_desc ctx;
222 cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
224 l2len = hdrlen.f.l2_len;
225 ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
227 /* setup IPCS* fields */
228 ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
229 ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
230 offsetof(struct rte_ipv4_hdr, hdr_checksum));
233 * When doing checksum or TCP segmentation with IPv6 headers,
234 * IPCSE field should be set t0 0.
236 if (flags & RTE_MBUF_F_TX_IP_CKSUM) {
237 ctx.lower_setup.ip_fields.ipcse =
238 (uint16_t)rte_cpu_to_le_16(ipcse - 1);
239 cmd_len |= E1000_TXD_CMD_IP;
240 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
242 ctx.lower_setup.ip_fields.ipcse = 0;
245 /* setup TUCS* fields */
246 ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
247 ctx.upper_setup.tcp_fields.tucse = 0;
249 switch (flags & RTE_MBUF_F_TX_L4_MASK) {
250 case RTE_MBUF_F_TX_UDP_CKSUM:
251 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
252 offsetof(struct rte_udp_hdr, dgram_cksum));
253 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
255 case RTE_MBUF_F_TX_TCP_CKSUM:
256 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
257 offsetof(struct rte_tcp_hdr, cksum));
258 cmd_len |= E1000_TXD_CMD_TCP;
259 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
262 ctx.upper_setup.tcp_fields.tucso = 0;
265 ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
266 ctx.tcp_seg_setup.data = 0;
270 txq->ctx_cache.flags = flags;
271 txq->ctx_cache.cmp_mask = cmp_mask;
272 txq->ctx_cache.hdrlen = hdrlen;
276 * Check which hardware context can be used. Use the existing match
277 * or create a new context descriptor.
279 static inline uint32_t
280 what_ctx_update(struct em_tx_queue *txq, uint64_t flags,
281 union em_vlan_macip hdrlen)
283 /* If match with the current context */
284 if (likely (txq->ctx_cache.flags == flags &&
285 ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
286 txq->ctx_cache.cmp_mask) == 0))
293 /* Reset transmit descriptors after they have been used */
295 em_xmit_cleanup(struct em_tx_queue *txq)
297 struct em_tx_entry *sw_ring = txq->sw_ring;
298 volatile struct e1000_data_desc *txr = txq->tx_ring;
299 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
300 uint16_t nb_tx_desc = txq->nb_tx_desc;
301 uint16_t desc_to_clean_to;
302 uint16_t nb_tx_to_clean;
304 /* Determine the last descriptor needing to be cleaned */
305 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
306 if (desc_to_clean_to >= nb_tx_desc)
307 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
309 /* Check to make sure the last descriptor to clean is done */
310 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
311 if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
314 "TX descriptor %4u is not done"
315 "(port=%d queue=%d)", desc_to_clean_to,
316 txq->port_id, txq->queue_id);
317 /* Failed to clean any descriptors, better luck next time */
321 /* Figure out how many descriptors will be cleaned */
322 if (last_desc_cleaned > desc_to_clean_to)
323 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
326 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
330 "Cleaning %4u TX descriptors: %4u to %4u "
331 "(port=%d queue=%d)", nb_tx_to_clean,
332 last_desc_cleaned, desc_to_clean_to, txq->port_id,
336 * The last descriptor to clean is done, so that means all the
337 * descriptors from the last descriptor that was cleaned
338 * up to the last descriptor with the RS bit set
339 * are done. Only reset the threshold descriptor.
341 txr[desc_to_clean_to].upper.fields.status = 0;
343 /* Update the txq to reflect the last descriptor that was cleaned */
344 txq->last_desc_cleaned = desc_to_clean_to;
345 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
351 static inline uint32_t
352 tx_desc_cksum_flags_to_upper(uint64_t ol_flags)
354 static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
355 static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
358 tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
359 tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
364 eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
367 struct em_tx_queue *txq;
368 struct em_tx_entry *sw_ring;
369 struct em_tx_entry *txe, *txn;
370 volatile struct e1000_data_desc *txr;
371 volatile struct e1000_data_desc *txd;
372 struct rte_mbuf *tx_pkt;
373 struct rte_mbuf *m_seg;
374 uint64_t buf_dma_addr;
376 uint32_t cmd_type_len;
386 union em_vlan_macip hdrlen;
389 sw_ring = txq->sw_ring;
391 tx_id = txq->tx_tail;
392 txe = &sw_ring[tx_id];
394 /* Determine if the descriptor ring needs to be cleaned. */
395 if (txq->nb_tx_free < txq->tx_free_thresh)
396 em_xmit_cleanup(txq);
399 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
403 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
406 * Determine how many (if any) context descriptors
407 * are needed for offload functionality.
409 ol_flags = tx_pkt->ol_flags;
411 /* If hardware offload required */
412 tx_ol_req = (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK));
414 hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
415 hdrlen.f.l2_len = tx_pkt->l2_len;
416 hdrlen.f.l3_len = tx_pkt->l3_len;
417 /* If new context to be built or reuse the exist ctx. */
418 ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
420 /* Only allocate context descriptor if required*/
421 new_ctx = (ctx == EM_CTX_NUM);
425 * Keep track of how many descriptors are used this loop
426 * This will always be the number of segments + the number of
427 * Context descriptors required to transmit the packet
429 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
432 * The number of descriptors that must be allocated for a
433 * packet is the number of segments of that packet, plus 1
434 * Context Descriptor for the hardware offload, if any.
435 * Determine the last TX descriptor to allocate in the TX ring
436 * for the packet, starting from the current position (tx_id)
439 tx_last = (uint16_t) (tx_id + nb_used - 1);
442 if (tx_last >= txq->nb_tx_desc)
443 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
445 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
446 " tx_first=%u tx_last=%u",
447 (unsigned) txq->port_id,
448 (unsigned) txq->queue_id,
449 (unsigned) tx_pkt->pkt_len,
454 * Make sure there are enough TX descriptors available to
455 * transmit the entire packet.
456 * nb_used better be less than or equal to txq->tx_rs_thresh
458 while (unlikely (nb_used > txq->nb_tx_free)) {
459 PMD_TX_LOG(DEBUG, "Not enough free TX descriptors "
460 "nb_used=%4u nb_free=%4u "
461 "(port=%d queue=%d)",
462 nb_used, txq->nb_tx_free,
463 txq->port_id, txq->queue_id);
465 if (em_xmit_cleanup(txq) != 0) {
466 /* Could not clean any descriptors */
474 * By now there are enough free TX descriptors to transmit
479 * Set common flags of all TX Data Descriptors.
481 * The following bits must be set in all Data Descriptors:
482 * - E1000_TXD_DTYP_DATA
483 * - E1000_TXD_DTYP_DEXT
485 * The following bits must be set in the first Data Descriptor
486 * and are ignored in the other ones:
487 * - E1000_TXD_POPTS_IXSM
488 * - E1000_TXD_POPTS_TXSM
490 * The following bits must be set in the last Data Descriptor
491 * and are ignored in the other ones:
492 * - E1000_TXD_CMD_VLE
493 * - E1000_TXD_CMD_IFCS
495 * The following bits must only be set in the last Data
497 * - E1000_TXD_CMD_EOP
499 * The following bits can be set in any Data Descriptor, but
500 * are only set in the last Data Descriptor:
503 cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
507 /* Set VLAN Tag offload fields. */
508 if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT) {
509 cmd_type_len |= E1000_TXD_CMD_VLE;
510 popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
515 * Setup the TX Context Descriptor if required
518 volatile struct e1000_context_desc *ctx_txd;
520 ctx_txd = (volatile struct e1000_context_desc *)
523 txn = &sw_ring[txe->next_id];
524 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
526 if (txe->mbuf != NULL) {
527 rte_pktmbuf_free_seg(txe->mbuf);
531 em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
534 txe->last_id = tx_last;
535 tx_id = txe->next_id;
540 * Setup the TX Data Descriptor,
541 * This path will go through
542 * whatever new/reuse the context descriptor
544 popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
550 txn = &sw_ring[txe->next_id];
552 if (txe->mbuf != NULL)
553 rte_pktmbuf_free_seg(txe->mbuf);
557 * Set up Transmit Data Descriptor.
559 slen = m_seg->data_len;
560 buf_dma_addr = rte_mbuf_data_iova(m_seg);
562 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
563 txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
564 txd->upper.data = rte_cpu_to_le_32(popts_spec);
566 txe->last_id = tx_last;
567 tx_id = txe->next_id;
570 } while (m_seg != NULL);
573 * The last packet data descriptor needs End Of Packet (EOP)
575 cmd_type_len |= E1000_TXD_CMD_EOP;
576 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
577 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
579 /* Set RS bit only on threshold packets' last descriptor */
580 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
582 "Setting RS bit on TXD id=%4u "
583 "(port=%d queue=%d)",
584 tx_last, txq->port_id, txq->queue_id);
586 cmd_type_len |= E1000_TXD_CMD_RS;
588 /* Update txq RS bit counters */
591 txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
597 * Set the Transmit Descriptor Tail (TDT)
599 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
600 (unsigned) txq->port_id, (unsigned) txq->queue_id,
601 (unsigned) tx_id, (unsigned) nb_tx);
602 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
603 txq->tx_tail = tx_id;
608 /*********************************************************************
612 **********************************************************************/
614 eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
620 for (i = 0; i < nb_pkts; i++) {
623 if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
628 #ifdef RTE_ETHDEV_DEBUG_TX
629 ret = rte_validate_tx_offload(m);
635 ret = rte_net_intel_cksum_prepare(m);
645 /*********************************************************************
649 **********************************************************************/
651 static inline uint64_t
652 rx_desc_status_to_pkt_flags(uint32_t rx_status)
656 /* Check if VLAN present */
657 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
658 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED : 0);
663 static inline uint64_t
664 rx_desc_error_to_pkt_flags(uint32_t rx_error)
666 uint64_t pkt_flags = 0;
668 if (rx_error & E1000_RXD_ERR_IPE)
669 pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
670 if (rx_error & E1000_RXD_ERR_TCPE)
671 pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
676 eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
679 volatile struct e1000_rx_desc *rx_ring;
680 volatile struct e1000_rx_desc *rxdp;
681 struct em_rx_queue *rxq;
682 struct em_rx_entry *sw_ring;
683 struct em_rx_entry *rxe;
684 struct rte_mbuf *rxm;
685 struct rte_mbuf *nmb;
686 struct e1000_rx_desc rxd;
698 rx_id = rxq->rx_tail;
699 rx_ring = rxq->rx_ring;
700 sw_ring = rxq->sw_ring;
701 while (nb_rx < nb_pkts) {
703 * The order of operations here is important as the DD status
704 * bit must not be read after any other descriptor fields.
705 * rx_ring and rxdp are pointing to volatile data so the order
706 * of accesses cannot be reordered by the compiler. If they were
707 * not volatile, they could be reordered which could lead to
708 * using invalid descriptor fields when read from rxd.
710 rxdp = &rx_ring[rx_id];
711 status = rxdp->status;
712 if (! (status & E1000_RXD_STAT_DD))
719 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
720 * likely to be invalid and to be dropped by the various
721 * validation checks performed by the network stack.
723 * Allocate a new mbuf to replenish the RX ring descriptor.
724 * If the allocation fails:
725 * - arrange for that RX descriptor to be the first one
726 * being parsed the next time the receive function is
727 * invoked [on the same queue].
729 * - Stop parsing the RX ring and return immediately.
731 * This policy do not drop the packet received in the RX
732 * descriptor for which the allocation of a new mbuf failed.
733 * Thus, it allows that packet to be later retrieved if
734 * mbuf have been freed in the mean time.
735 * As a side effect, holding RX descriptors instead of
736 * systematically giving them back to the NIC may lead to
737 * RX ring exhaustion situations.
738 * However, the NIC can gracefully prevent such situations
739 * to happen by sending specific "back-pressure" flow control
740 * frames to its peer(s).
742 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
743 "status=0x%x pkt_len=%u",
744 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
745 (unsigned) rx_id, (unsigned) status,
746 (unsigned) rte_le_to_cpu_16(rxd.length));
748 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
750 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
752 (unsigned) rxq->port_id,
753 (unsigned) rxq->queue_id);
754 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
759 rxe = &sw_ring[rx_id];
761 if (rx_id == rxq->nb_rx_desc)
764 /* Prefetch next mbuf while processing current one. */
765 rte_em_prefetch(sw_ring[rx_id].mbuf);
768 * When next RX descriptor is on a cache-line boundary,
769 * prefetch the next 4 RX descriptors and the next 8 pointers
772 if ((rx_id & 0x3) == 0) {
773 rte_em_prefetch(&rx_ring[rx_id]);
774 rte_em_prefetch(&sw_ring[rx_id]);
777 /* Rearm RXD: attach new mbuf and reset status to zero. */
782 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
783 rxdp->buffer_addr = dma_addr;
787 * Initialize the returned mbuf.
788 * 1) setup generic mbuf fields:
789 * - number of segments,
792 * - RX port identifier.
793 * 2) integrate hardware offload data, if any:
795 * - IP checksum flag,
796 * - VLAN TCI, if any,
799 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
801 rxm->data_off = RTE_PKTMBUF_HEADROOM;
802 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
805 rxm->pkt_len = pkt_len;
806 rxm->data_len = pkt_len;
807 rxm->port = rxq->port_id;
809 rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
810 rxm->ol_flags = rxm->ol_flags |
811 rx_desc_error_to_pkt_flags(rxd.errors);
813 /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
814 rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
817 * Store the mbuf address into the next entry of the array
818 * of returned packets.
820 rx_pkts[nb_rx++] = rxm;
822 rxq->rx_tail = rx_id;
825 * If the number of free RX descriptors is greater than the RX free
826 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
828 * Update the RDT with the value of the last processed RX descriptor
829 * minus 1, to guarantee that the RDT register is never equal to the
830 * RDH register, which creates a "full" ring situtation from the
831 * hardware point of view...
833 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
834 if (nb_hold > rxq->rx_free_thresh) {
835 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
836 "nb_hold=%u nb_rx=%u",
837 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
838 (unsigned) rx_id, (unsigned) nb_hold,
840 rx_id = (uint16_t) ((rx_id == 0) ?
841 (rxq->nb_rx_desc - 1) : (rx_id - 1));
842 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
845 rxq->nb_rx_hold = nb_hold;
850 eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
853 struct em_rx_queue *rxq;
854 volatile struct e1000_rx_desc *rx_ring;
855 volatile struct e1000_rx_desc *rxdp;
856 struct em_rx_entry *sw_ring;
857 struct em_rx_entry *rxe;
858 struct rte_mbuf *first_seg;
859 struct rte_mbuf *last_seg;
860 struct rte_mbuf *rxm;
861 struct rte_mbuf *nmb;
862 struct e1000_rx_desc rxd;
863 uint64_t dma; /* Physical address of mbuf data buffer */
874 rx_id = rxq->rx_tail;
875 rx_ring = rxq->rx_ring;
876 sw_ring = rxq->sw_ring;
879 * Retrieve RX context of current packet, if any.
881 first_seg = rxq->pkt_first_seg;
882 last_seg = rxq->pkt_last_seg;
884 while (nb_rx < nb_pkts) {
887 * The order of operations here is important as the DD status
888 * bit must not be read after any other descriptor fields.
889 * rx_ring and rxdp are pointing to volatile data so the order
890 * of accesses cannot be reordered by the compiler. If they were
891 * not volatile, they could be reordered which could lead to
892 * using invalid descriptor fields when read from rxd.
894 rxdp = &rx_ring[rx_id];
895 status = rxdp->status;
896 if (! (status & E1000_RXD_STAT_DD))
903 * Allocate a new mbuf to replenish the RX ring descriptor.
904 * If the allocation fails:
905 * - arrange for that RX descriptor to be the first one
906 * being parsed the next time the receive function is
907 * invoked [on the same queue].
909 * - Stop parsing the RX ring and return immediately.
911 * This policy does not drop the packet received in the RX
912 * descriptor for which the allocation of a new mbuf failed.
913 * Thus, it allows that packet to be later retrieved if
914 * mbuf have been freed in the mean time.
915 * As a side effect, holding RX descriptors instead of
916 * systematically giving them back to the NIC may lead to
917 * RX ring exhaustion situations.
918 * However, the NIC can gracefully prevent such situations
919 * to happen by sending specific "back-pressure" flow control
920 * frames to its peer(s).
922 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
923 "status=0x%x data_len=%u",
924 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
925 (unsigned) rx_id, (unsigned) status,
926 (unsigned) rte_le_to_cpu_16(rxd.length));
928 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
930 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
931 "queue_id=%u", (unsigned) rxq->port_id,
932 (unsigned) rxq->queue_id);
933 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
938 rxe = &sw_ring[rx_id];
940 if (rx_id == rxq->nb_rx_desc)
943 /* Prefetch next mbuf while processing current one. */
944 rte_em_prefetch(sw_ring[rx_id].mbuf);
947 * When next RX descriptor is on a cache-line boundary,
948 * prefetch the next 4 RX descriptors and the next 8 pointers
951 if ((rx_id & 0x3) == 0) {
952 rte_em_prefetch(&rx_ring[rx_id]);
953 rte_em_prefetch(&sw_ring[rx_id]);
957 * Update RX descriptor with the physical address of the new
958 * data buffer of the new allocated mbuf.
962 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
963 rxdp->buffer_addr = dma;
967 * Set data length & data buffer address of mbuf.
969 data_len = rte_le_to_cpu_16(rxd.length);
970 rxm->data_len = data_len;
971 rxm->data_off = RTE_PKTMBUF_HEADROOM;
974 * If this is the first buffer of the received packet,
975 * set the pointer to the first mbuf of the packet and
976 * initialize its context.
977 * Otherwise, update the total length and the number of segments
978 * of the current scattered packet, and update the pointer to
979 * the last mbuf of the current packet.
981 if (first_seg == NULL) {
983 first_seg->pkt_len = data_len;
984 first_seg->nb_segs = 1;
986 first_seg->pkt_len += data_len;
987 first_seg->nb_segs++;
988 last_seg->next = rxm;
992 * If this is not the last buffer of the received packet,
993 * update the pointer to the last mbuf of the current scattered
994 * packet and continue to parse the RX ring.
996 if (! (status & E1000_RXD_STAT_EOP)) {
1002 * This is the last buffer of the received packet.
1003 * If the CRC is not stripped by the hardware:
1004 * - Subtract the CRC length from the total packet length.
1005 * - If the last buffer only contains the whole CRC or a part
1006 * of it, free the mbuf associated to the last buffer.
1007 * If part of the CRC is also contained in the previous
1008 * mbuf, subtract the length of that CRC part from the
1009 * data length of the previous mbuf.
1012 if (unlikely(rxq->crc_len > 0)) {
1013 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1014 if (data_len <= RTE_ETHER_CRC_LEN) {
1015 rte_pktmbuf_free_seg(rxm);
1016 first_seg->nb_segs--;
1017 last_seg->data_len = (uint16_t)
1018 (last_seg->data_len -
1019 (RTE_ETHER_CRC_LEN - data_len));
1020 last_seg->next = NULL;
1022 rxm->data_len = (uint16_t)
1023 (data_len - RTE_ETHER_CRC_LEN);
1027 * Initialize the first mbuf of the returned packet:
1028 * - RX port identifier,
1029 * - hardware offload data, if any:
1030 * - IP checksum flag,
1033 first_seg->port = rxq->port_id;
1035 first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
1036 first_seg->ol_flags = first_seg->ol_flags |
1037 rx_desc_error_to_pkt_flags(rxd.errors);
1039 /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
1040 rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
1042 /* Prefetch data of first segment, if configured to do so. */
1043 rte_packet_prefetch((char *)first_seg->buf_addr +
1044 first_seg->data_off);
1047 * Store the mbuf address into the next entry of the array
1048 * of returned packets.
1050 rx_pkts[nb_rx++] = first_seg;
1053 * Setup receipt context for a new packet.
1059 * Record index of the next RX descriptor to probe.
1061 rxq->rx_tail = rx_id;
1064 * Save receive context.
1066 rxq->pkt_first_seg = first_seg;
1067 rxq->pkt_last_seg = last_seg;
1070 * If the number of free RX descriptors is greater than the RX free
1071 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1073 * Update the RDT with the value of the last processed RX descriptor
1074 * minus 1, to guarantee that the RDT register is never equal to the
1075 * RDH register, which creates a "full" ring situtation from the
1076 * hardware point of view...
1078 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1079 if (nb_hold > rxq->rx_free_thresh) {
1080 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1081 "nb_hold=%u nb_rx=%u",
1082 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1083 (unsigned) rx_id, (unsigned) nb_hold,
1085 rx_id = (uint16_t) ((rx_id == 0) ?
1086 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1087 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1090 rxq->nb_rx_hold = nb_hold;
1094 #define EM_MAX_BUF_SIZE 16384
1095 #define EM_RCTL_FLXBUF_STEP 1024
1098 em_tx_queue_release_mbufs(struct em_tx_queue *txq)
1102 if (txq->sw_ring != NULL) {
1103 for (i = 0; i != txq->nb_tx_desc; i++) {
1104 if (txq->sw_ring[i].mbuf != NULL) {
1105 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1106 txq->sw_ring[i].mbuf = NULL;
1113 em_tx_queue_release(struct em_tx_queue *txq)
1116 em_tx_queue_release_mbufs(txq);
1117 rte_free(txq->sw_ring);
1123 eth_em_tx_queue_release(void *txq)
1125 em_tx_queue_release(txq);
1128 /* (Re)set dynamic em_tx_queue fields to defaults */
1130 em_reset_tx_queue(struct em_tx_queue *txq)
1132 uint16_t i, nb_desc, prev;
1133 static const struct e1000_data_desc txd_init = {
1134 .upper.fields = {.status = E1000_TXD_STAT_DD},
1137 nb_desc = txq->nb_tx_desc;
1139 /* Initialize ring entries */
1141 prev = (uint16_t) (nb_desc - 1);
1143 for (i = 0; i < nb_desc; i++) {
1144 txq->tx_ring[i] = txd_init;
1145 txq->sw_ring[i].mbuf = NULL;
1146 txq->sw_ring[i].last_id = i;
1147 txq->sw_ring[prev].next_id = i;
1152 * Always allow 1 descriptor to be un-allocated to avoid
1153 * a H/W race condition
1155 txq->nb_tx_free = (uint16_t)(nb_desc - 1);
1156 txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
1157 txq->nb_tx_used = 0;
1160 memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
1164 em_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1166 uint64_t tx_offload_capa;
1170 DEV_TX_OFFLOAD_MULTI_SEGS |
1171 DEV_TX_OFFLOAD_VLAN_INSERT |
1172 DEV_TX_OFFLOAD_IPV4_CKSUM |
1173 DEV_TX_OFFLOAD_UDP_CKSUM |
1174 DEV_TX_OFFLOAD_TCP_CKSUM;
1176 return tx_offload_capa;
1180 em_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1182 uint64_t tx_queue_offload_capa;
1185 * As only one Tx queue can be used, let per queue offloading
1186 * capability be same to per port queue offloading capability
1187 * for better convenience.
1189 tx_queue_offload_capa = em_get_tx_port_offloads_capa(dev);
1191 return tx_queue_offload_capa;
1195 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
1198 unsigned int socket_id,
1199 const struct rte_eth_txconf *tx_conf)
1201 const struct rte_memzone *tz;
1202 struct em_tx_queue *txq;
1203 struct e1000_hw *hw;
1205 uint16_t tx_rs_thresh, tx_free_thresh;
1208 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1210 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1213 * Validate number of transmit descriptors.
1214 * It must not exceed hardware maximum, and must be multiple
1217 if (nb_desc % EM_TXD_ALIGN != 0 ||
1218 (nb_desc > E1000_MAX_RING_DESC) ||
1219 (nb_desc < E1000_MIN_RING_DESC)) {
1223 tx_free_thresh = tx_conf->tx_free_thresh;
1224 if (tx_free_thresh == 0)
1225 tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
1226 DEFAULT_TX_FREE_THRESH);
1228 tx_rs_thresh = tx_conf->tx_rs_thresh;
1229 if (tx_rs_thresh == 0)
1230 tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
1231 DEFAULT_TX_RS_THRESH);
1233 if (tx_free_thresh >= (nb_desc - 3)) {
1234 PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the "
1235 "number of TX descriptors minus 3. "
1236 "(tx_free_thresh=%u port=%d queue=%d)",
1237 (unsigned int)tx_free_thresh,
1238 (int)dev->data->port_id, (int)queue_idx);
1241 if (tx_rs_thresh > tx_free_thresh) {
1242 PMD_INIT_LOG(ERR, "tx_rs_thresh must be less than or equal to "
1243 "tx_free_thresh. (tx_free_thresh=%u "
1244 "tx_rs_thresh=%u port=%d queue=%d)",
1245 (unsigned int)tx_free_thresh,
1246 (unsigned int)tx_rs_thresh,
1247 (int)dev->data->port_id,
1253 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1254 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1255 * by the NIC and all descriptors are written back after the NIC
1256 * accumulates WTHRESH descriptors.
1258 if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
1259 PMD_INIT_LOG(ERR, "TX WTHRESH must be set to 0 if "
1260 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
1261 "port=%d queue=%d)", (unsigned int)tx_rs_thresh,
1262 (int)dev->data->port_id, (int)queue_idx);
1266 /* Free memory prior to re-allocation if needed... */
1267 if (dev->data->tx_queues[queue_idx] != NULL) {
1268 em_tx_queue_release(dev->data->tx_queues[queue_idx]);
1269 dev->data->tx_queues[queue_idx] = NULL;
1273 * Allocate TX ring hardware descriptors. A memzone large enough to
1274 * handle the maximum ring size is allocated in order to allow for
1275 * resizing in later calls to the queue setup function.
1277 tsize = sizeof(txq->tx_ring[0]) * E1000_MAX_RING_DESC;
1278 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
1279 RTE_CACHE_LINE_SIZE, socket_id);
1283 /* Allocate the tx queue data structure. */
1284 if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
1285 RTE_CACHE_LINE_SIZE)) == NULL)
1288 /* Allocate software ring */
1289 if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
1290 sizeof(txq->sw_ring[0]) * nb_desc,
1291 RTE_CACHE_LINE_SIZE)) == NULL) {
1292 em_tx_queue_release(txq);
1296 txq->nb_tx_desc = nb_desc;
1297 txq->tx_free_thresh = tx_free_thresh;
1298 txq->tx_rs_thresh = tx_rs_thresh;
1299 txq->pthresh = tx_conf->tx_thresh.pthresh;
1300 txq->hthresh = tx_conf->tx_thresh.hthresh;
1301 txq->wthresh = tx_conf->tx_thresh.wthresh;
1302 txq->queue_id = queue_idx;
1303 txq->port_id = dev->data->port_id;
1305 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1306 txq->tx_ring_phys_addr = tz->iova;
1307 txq->tx_ring = (struct e1000_data_desc *) tz->addr;
1309 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1310 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1312 em_reset_tx_queue(txq);
1314 dev->data->tx_queues[queue_idx] = txq;
1315 txq->offloads = offloads;
1320 em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
1324 if (rxq->sw_ring != NULL) {
1325 for (i = 0; i != rxq->nb_rx_desc; i++) {
1326 if (rxq->sw_ring[i].mbuf != NULL) {
1327 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1328 rxq->sw_ring[i].mbuf = NULL;
1335 em_rx_queue_release(struct em_rx_queue *rxq)
1338 em_rx_queue_release_mbufs(rxq);
1339 rte_free(rxq->sw_ring);
1345 eth_em_rx_queue_release(void *rxq)
1347 em_rx_queue_release(rxq);
1350 /* Reset dynamic em_rx_queue fields back to defaults */
1352 em_reset_rx_queue(struct em_rx_queue *rxq)
1355 rxq->nb_rx_hold = 0;
1356 rxq->pkt_first_seg = NULL;
1357 rxq->pkt_last_seg = NULL;
1361 em_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1363 uint64_t rx_offload_capa;
1364 uint32_t max_rx_pktlen;
1366 max_rx_pktlen = em_get_max_pktlen(dev);
1369 DEV_RX_OFFLOAD_VLAN_STRIP |
1370 DEV_RX_OFFLOAD_VLAN_FILTER |
1371 DEV_RX_OFFLOAD_IPV4_CKSUM |
1372 DEV_RX_OFFLOAD_UDP_CKSUM |
1373 DEV_RX_OFFLOAD_TCP_CKSUM |
1374 DEV_RX_OFFLOAD_KEEP_CRC |
1375 DEV_RX_OFFLOAD_SCATTER;
1376 if (max_rx_pktlen > RTE_ETHER_MAX_LEN)
1377 rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1379 return rx_offload_capa;
1383 em_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1385 uint64_t rx_queue_offload_capa;
1388 * As only one Rx queue can be used, let per queue offloading
1389 * capability be same to per port queue offloading capability
1390 * for better convenience.
1392 rx_queue_offload_capa = em_get_rx_port_offloads_capa(dev);
1394 return rx_queue_offload_capa;
1398 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
1401 unsigned int socket_id,
1402 const struct rte_eth_rxconf *rx_conf,
1403 struct rte_mempool *mp)
1405 const struct rte_memzone *rz;
1406 struct em_rx_queue *rxq;
1407 struct e1000_hw *hw;
1411 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1413 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1416 * Validate number of receive descriptors.
1417 * It must not exceed hardware maximum, and must be multiple
1420 if (nb_desc % EM_RXD_ALIGN != 0 ||
1421 (nb_desc > E1000_MAX_RING_DESC) ||
1422 (nb_desc < E1000_MIN_RING_DESC)) {
1427 * EM devices don't support drop_en functionality.
1428 * It's an optimization that does nothing on single-queue devices,
1429 * so just log the issue and carry on.
1431 if (rx_conf->rx_drop_en) {
1432 PMD_INIT_LOG(NOTICE, "drop_en functionality not supported by "
1436 /* Free memory prior to re-allocation if needed. */
1437 if (dev->data->rx_queues[queue_idx] != NULL) {
1438 em_rx_queue_release(dev->data->rx_queues[queue_idx]);
1439 dev->data->rx_queues[queue_idx] = NULL;
1442 /* Allocate RX ring for max possible mumber of hardware descriptors. */
1443 rsize = sizeof(rxq->rx_ring[0]) * E1000_MAX_RING_DESC;
1444 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
1445 RTE_CACHE_LINE_SIZE, socket_id);
1449 /* Allocate the RX queue data structure. */
1450 if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
1451 RTE_CACHE_LINE_SIZE)) == NULL)
1454 /* Allocate software ring. */
1455 if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1456 sizeof (rxq->sw_ring[0]) * nb_desc,
1457 RTE_CACHE_LINE_SIZE)) == NULL) {
1458 em_rx_queue_release(rxq);
1463 rxq->nb_rx_desc = nb_desc;
1464 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1465 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1466 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1467 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1468 rxq->queue_id = queue_idx;
1469 rxq->port_id = dev->data->port_id;
1470 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1471 rxq->crc_len = RTE_ETHER_CRC_LEN;
1475 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1476 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
1477 rxq->rx_ring_phys_addr = rz->iova;
1478 rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
1480 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1481 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1483 dev->data->rx_queues[queue_idx] = rxq;
1484 em_reset_rx_queue(rxq);
1485 rxq->offloads = offloads;
1491 eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1493 #define EM_RXQ_SCAN_INTERVAL 4
1494 volatile struct e1000_rx_desc *rxdp;
1495 struct em_rx_queue *rxq;
1498 rxq = dev->data->rx_queues[rx_queue_id];
1499 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1501 while ((desc < rxq->nb_rx_desc) &&
1502 (rxdp->status & E1000_RXD_STAT_DD)) {
1503 desc += EM_RXQ_SCAN_INTERVAL;
1504 rxdp += EM_RXQ_SCAN_INTERVAL;
1505 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1506 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1507 desc - rxq->nb_rx_desc]);
1514 eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
1516 volatile struct e1000_rx_desc *rxdp;
1517 struct em_rx_queue *rxq = rx_queue;
1520 if (unlikely(offset >= rxq->nb_rx_desc))
1522 desc = rxq->rx_tail + offset;
1523 if (desc >= rxq->nb_rx_desc)
1524 desc -= rxq->nb_rx_desc;
1526 rxdp = &rxq->rx_ring[desc];
1527 return !!(rxdp->status & E1000_RXD_STAT_DD);
1531 eth_em_rx_descriptor_status(void *rx_queue, uint16_t offset)
1533 struct em_rx_queue *rxq = rx_queue;
1534 volatile uint8_t *status;
1537 if (unlikely(offset >= rxq->nb_rx_desc))
1540 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1541 return RTE_ETH_RX_DESC_UNAVAIL;
1543 desc = rxq->rx_tail + offset;
1544 if (desc >= rxq->nb_rx_desc)
1545 desc -= rxq->nb_rx_desc;
1547 status = &rxq->rx_ring[desc].status;
1548 if (*status & E1000_RXD_STAT_DD)
1549 return RTE_ETH_RX_DESC_DONE;
1551 return RTE_ETH_RX_DESC_AVAIL;
1555 eth_em_tx_descriptor_status(void *tx_queue, uint16_t offset)
1557 struct em_tx_queue *txq = tx_queue;
1558 volatile uint8_t *status;
1561 if (unlikely(offset >= txq->nb_tx_desc))
1564 desc = txq->tx_tail + offset;
1565 /* go to next desc that has the RS bit */
1566 desc = ((desc + txq->tx_rs_thresh - 1) / txq->tx_rs_thresh) *
1568 if (desc >= txq->nb_tx_desc) {
1569 desc -= txq->nb_tx_desc;
1570 if (desc >= txq->nb_tx_desc)
1571 desc -= txq->nb_tx_desc;
1574 status = &txq->tx_ring[desc].upper.fields.status;
1575 if (*status & E1000_TXD_STAT_DD)
1576 return RTE_ETH_TX_DESC_DONE;
1578 return RTE_ETH_TX_DESC_FULL;
1582 em_dev_clear_queues(struct rte_eth_dev *dev)
1585 struct em_tx_queue *txq;
1586 struct em_rx_queue *rxq;
1588 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1589 txq = dev->data->tx_queues[i];
1591 em_tx_queue_release_mbufs(txq);
1592 em_reset_tx_queue(txq);
1596 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1597 rxq = dev->data->rx_queues[i];
1599 em_rx_queue_release_mbufs(rxq);
1600 em_reset_rx_queue(rxq);
1606 em_dev_free_queues(struct rte_eth_dev *dev)
1610 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1611 eth_em_rx_queue_release(dev->data->rx_queues[i]);
1612 dev->data->rx_queues[i] = NULL;
1613 rte_eth_dma_zone_free(dev, "rx_ring", i);
1615 dev->data->nb_rx_queues = 0;
1617 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1618 eth_em_tx_queue_release(dev->data->tx_queues[i]);
1619 dev->data->tx_queues[i] = NULL;
1620 rte_eth_dma_zone_free(dev, "tx_ring", i);
1622 dev->data->nb_tx_queues = 0;
1626 * Takes as input/output parameter RX buffer size.
1627 * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
1630 em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
1633 * For BSIZE & BSEX all configurable sizes are:
1634 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1635 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1636 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1637 * 2048: rctl |= E1000_RCTL_SZ_2048;
1638 * 1024: rctl |= E1000_RCTL_SZ_1024;
1639 * 512: rctl |= E1000_RCTL_SZ_512;
1640 * 256: rctl |= E1000_RCTL_SZ_256;
1642 static const struct {
1645 } bufsz_to_rctl[] = {
1646 {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
1647 {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
1648 {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
1649 {2048, E1000_RCTL_SZ_2048},
1650 {1024, E1000_RCTL_SZ_1024},
1651 {512, E1000_RCTL_SZ_512},
1652 {256, E1000_RCTL_SZ_256},
1656 uint32_t rctl_bsize;
1658 rctl_bsize = *bufsz;
1661 * Starting from 82571 it is possible to specify RX buffer size
1662 * by RCTL.FLXBUF. When this field is different from zero, the
1663 * RX buffer size = RCTL.FLXBUF * 1K
1664 * (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
1665 * It is working ok on real HW, but by some reason doesn't work
1666 * on VMware emulated 82574L.
1667 * So for now, always use BSIZE/BSEX to setup RX buffer size.
1668 * If you don't plan to use it on VMware emulated 82574L and
1669 * would like to specify RX buffer size in 1K granularity,
1670 * uncomment the following lines:
1671 * ***************************************************************
1672 * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
1673 * rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
1674 * rctl_bsize /= EM_RCTL_FLXBUF_STEP;
1675 * *bufsz = rctl_bsize;
1676 * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
1677 * E1000_RCTL_FLXBUF_MASK);
1679 * ***************************************************************
1682 for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
1684 if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
1685 *bufsz = bufsz_to_rctl[i].bufsz;
1686 return bufsz_to_rctl[i].rctl;
1690 /* Should never happen. */
1695 em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
1697 struct em_rx_entry *rxe = rxq->sw_ring;
1700 static const struct e1000_rx_desc rxd_init = {
1704 /* Initialize software ring entries */
1705 for (i = 0; i < rxq->nb_rx_desc; i++) {
1706 volatile struct e1000_rx_desc *rxd;
1707 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1710 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1711 "queue_id=%hu", rxq->queue_id);
1716 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
1718 /* Clear HW ring memory */
1719 rxq->rx_ring[i] = rxd_init;
1721 rxd = &rxq->rx_ring[i];
1722 rxd->buffer_addr = dma_addr;
1729 /*********************************************************************
1731 * Enable receive unit.
1733 **********************************************************************/
1735 eth_em_rx_init(struct rte_eth_dev *dev)
1737 struct e1000_hw *hw;
1738 struct em_rx_queue *rxq;
1739 struct rte_eth_rxmode *rxmode;
1743 uint32_t rctl_bsize;
1747 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1748 rxmode = &dev->data->dev_conf.rxmode;
1751 * Make sure receives are disabled while setting
1752 * up the descriptor ring.
1754 rctl = E1000_READ_REG(hw, E1000_RCTL);
1755 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1757 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1759 /* Disable extended descriptor type. */
1760 rfctl &= ~E1000_RFCTL_EXTEN;
1761 /* Disable accelerated acknowledge */
1762 if (hw->mac.type == e1000_82574)
1763 rfctl |= E1000_RFCTL_ACK_DIS;
1765 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1768 * XXX TEMPORARY WORKAROUND: on some systems with 82573
1769 * long latencies are observed, like Lenovo X60. This
1770 * change eliminates the problem, but since having positive
1771 * values in RDTR is a known source of problems on other
1772 * platforms another solution is being sought.
1774 if (hw->mac.type == e1000_82573)
1775 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
1777 dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
1779 /* Determine RX bufsize. */
1780 rctl_bsize = EM_MAX_BUF_SIZE;
1781 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1784 rxq = dev->data->rx_queues[i];
1785 buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
1786 RTE_PKTMBUF_HEADROOM;
1787 rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
1790 rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
1792 /* Configure and enable each RX queue. */
1793 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1797 rxq = dev->data->rx_queues[i];
1799 /* Allocate buffers for descriptor rings and setup queue */
1800 ret = em_alloc_rx_queue_mbufs(rxq);
1805 * Reset crc_len in case it was changed after queue setup by a
1808 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1809 rxq->crc_len = RTE_ETHER_CRC_LEN;
1813 bus_addr = rxq->rx_ring_phys_addr;
1814 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1816 sizeof(*rxq->rx_ring));
1817 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1818 (uint32_t)(bus_addr >> 32));
1819 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1821 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1822 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1824 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1825 rxdctl &= 0xFE000000;
1826 rxdctl |= rxq->pthresh & 0x3F;
1827 rxdctl |= (rxq->hthresh & 0x3F) << 8;
1828 rxdctl |= (rxq->wthresh & 0x3F) << 16;
1829 rxdctl |= E1000_RXDCTL_GRAN;
1830 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1833 * Due to EM devices not having any sort of hardware
1834 * limit for packet length, jumbo frame of any size
1835 * can be accepted, thus we have to enable scattered
1836 * rx if jumbo frames are enabled (or if buffer size
1837 * is too small to accommodate non-jumbo packets)
1838 * to avoid splitting packets that don't fit into
1841 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ||
1842 rctl_bsize < RTE_ETHER_MAX_LEN) {
1843 if (!dev->data->scattered_rx)
1844 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
1846 (eth_rx_burst_t)eth_em_recv_scattered_pkts;
1847 dev->data->scattered_rx = 1;
1851 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
1852 if (!dev->data->scattered_rx)
1853 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
1854 dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
1855 dev->data->scattered_rx = 1;
1859 * Setup the Checksum Register.
1860 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1862 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1864 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
1865 rxcsum |= E1000_RXCSUM_IPOFL;
1867 rxcsum &= ~E1000_RXCSUM_IPOFL;
1868 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1870 /* No MRQ or RSS support for now */
1872 /* Set early receive threshold on appropriate hw */
1873 if ((hw->mac.type == e1000_ich9lan ||
1874 hw->mac.type == e1000_pch2lan ||
1875 hw->mac.type == e1000_ich10lan) &&
1876 rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1877 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1878 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
1879 E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
1882 if (hw->mac.type == e1000_pch2lan) {
1883 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1884 e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
1886 e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
1889 /* Setup the Receive Control Register. */
1890 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1891 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1893 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1895 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1896 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1897 E1000_RCTL_RDMTS_HALF |
1898 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1900 /* Make sure VLAN Filters are off. */
1901 rctl &= ~E1000_RCTL_VFE;
1902 /* Don't store bad packets. */
1903 rctl &= ~E1000_RCTL_SBP;
1904 /* Legacy descriptor type. */
1905 rctl &= ~E1000_RCTL_DTYP_MASK;
1908 * Configure support of jumbo frames, if any.
1910 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
1911 rctl |= E1000_RCTL_LPE;
1913 rctl &= ~E1000_RCTL_LPE;
1915 /* Enable Receives. */
1916 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1921 /*********************************************************************
1923 * Enable transmit unit.
1925 **********************************************************************/
1927 eth_em_tx_init(struct rte_eth_dev *dev)
1929 struct e1000_hw *hw;
1930 struct em_tx_queue *txq;
1935 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1937 /* Setup the Base and Length of the Tx Descriptor Rings. */
1938 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1941 txq = dev->data->tx_queues[i];
1942 bus_addr = txq->tx_ring_phys_addr;
1943 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1945 sizeof(*txq->tx_ring));
1946 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1947 (uint32_t)(bus_addr >> 32));
1948 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1950 /* Setup the HW Tx Head and Tail descriptor pointers. */
1951 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1952 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1954 /* Setup Transmit threshold registers. */
1955 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1957 * bit 22 is reserved, on some models should always be 0,
1958 * on others - always 1.
1960 txdctl &= E1000_TXDCTL_COUNT_DESC;
1961 txdctl |= txq->pthresh & 0x3F;
1962 txdctl |= (txq->hthresh & 0x3F) << 8;
1963 txdctl |= (txq->wthresh & 0x3F) << 16;
1964 txdctl |= E1000_TXDCTL_GRAN;
1965 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1968 /* Program the Transmit Control Register. */
1969 tctl = E1000_READ_REG(hw, E1000_TCTL);
1970 tctl &= ~E1000_TCTL_CT;
1971 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1972 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1974 /* SPT and CNP Si errata workaround to avoid data corruption */
1975 if (hw->mac.type == e1000_pch_spt) {
1977 reg_val = E1000_READ_REG(hw, E1000_IOSFPC);
1978 reg_val |= E1000_RCTL_RDMTS_HEX;
1979 E1000_WRITE_REG(hw, E1000_IOSFPC, reg_val);
1981 /* Dropping the number of outstanding requests from
1982 * 3 to 2 in order to avoid a buffer overrun.
1984 reg_val = E1000_READ_REG(hw, E1000_TARC(0));
1985 reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
1986 reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
1987 E1000_WRITE_REG(hw, E1000_TARC(0), reg_val);
1990 /* This write will effectively turn on the transmit unit. */
1991 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
1995 em_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
1996 struct rte_eth_rxq_info *qinfo)
1998 struct em_rx_queue *rxq;
2000 rxq = dev->data->rx_queues[queue_id];
2002 qinfo->mp = rxq->mb_pool;
2003 qinfo->scattered_rx = dev->data->scattered_rx;
2004 qinfo->nb_desc = rxq->nb_rx_desc;
2005 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2006 qinfo->conf.offloads = rxq->offloads;
2010 em_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2011 struct rte_eth_txq_info *qinfo)
2013 struct em_tx_queue *txq;
2015 txq = dev->data->tx_queues[queue_id];
2017 qinfo->nb_desc = txq->nb_tx_desc;
2019 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2020 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2021 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2022 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
2023 qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh;
2024 qinfo->conf.offloads = txq->offloads;
2028 e1000_flush_tx_ring(struct rte_eth_dev *dev)
2030 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2031 volatile struct e1000_data_desc *tx_desc;
2032 volatile uint32_t *tdt_reg_addr;
2033 uint32_t tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
2034 uint16_t size = 512;
2035 struct em_tx_queue *txq;
2038 if (dev->data->tx_queues == NULL)
2040 tctl = E1000_READ_REG(hw, E1000_TCTL);
2041 E1000_WRITE_REG(hw, E1000_TCTL, tctl | E1000_TCTL_EN);
2042 for (i = 0; i < dev->data->nb_tx_queues &&
2043 i < E1000_I219_MAX_TX_QUEUE_NUM; i++) {
2044 txq = dev->data->tx_queues[i];
2045 tdt = E1000_READ_REG(hw, E1000_TDT(i));
2046 if (tdt != txq->tx_tail)
2048 tx_desc = &txq->tx_ring[txq->tx_tail];
2049 tx_desc->buffer_addr = rte_cpu_to_le_64(txq->tx_ring_phys_addr);
2050 tx_desc->lower.data = rte_cpu_to_le_32(txd_lower | size);
2051 tx_desc->upper.data = 0;
2055 if (txq->tx_tail == txq->nb_tx_desc)
2057 tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(i));
2058 E1000_PCI_REG_WRITE(tdt_reg_addr, txq->tx_tail);
2064 e1000_flush_rx_ring(struct rte_eth_dev *dev)
2066 uint32_t rctl, rxdctl;
2067 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2070 rctl = E1000_READ_REG(hw, E1000_RCTL);
2071 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2072 E1000_WRITE_FLUSH(hw);
2075 for (i = 0; i < dev->data->nb_rx_queues &&
2076 i < E1000_I219_MAX_RX_QUEUE_NUM; i++) {
2077 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2078 /* zero the lower 14 bits (prefetch and host thresholds) */
2079 rxdctl &= 0xffffc000;
2081 /* update thresholds: prefetch threshold to 31,
2082 * host threshold to 1 and make sure the granularity
2083 * is "descriptors" and not "cache lines"
2085 rxdctl |= (0x1F | (1UL << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
2087 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2089 /* momentarily enable the RX ring for the changes to take effect */
2090 E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
2091 E1000_WRITE_FLUSH(hw);
2093 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2097 * em_flush_desc_rings - remove all descriptors from the descriptor rings
2099 * In i219, the descriptor rings must be emptied before resetting/closing the
2100 * HW. Failure to do this will cause the HW to enter a unit hang state which
2101 * can only be released by PCI reset on the device
2106 em_flush_desc_rings(struct rte_eth_dev *dev)
2108 uint32_t fextnvm11, tdlen;
2109 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2110 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2111 uint16_t pci_cfg_status = 0;
2114 fextnvm11 = E1000_READ_REG(hw, E1000_FEXTNVM11);
2115 E1000_WRITE_REG(hw, E1000_FEXTNVM11,
2116 fextnvm11 | E1000_FEXTNVM11_DISABLE_MULR_FIX);
2117 tdlen = E1000_READ_REG(hw, E1000_TDLEN(0));
2118 ret = rte_pci_read_config(pci_dev, &pci_cfg_status,
2119 sizeof(pci_cfg_status), PCI_CFG_STATUS_REG);
2121 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
2122 PCI_CFG_STATUS_REG);
2126 /* do nothing if we're not in faulty state, or if the queue is empty */
2127 if ((pci_cfg_status & FLUSH_DESC_REQUIRED) && tdlen) {
2128 /* flush desc ring */
2129 e1000_flush_tx_ring(dev);
2130 ret = rte_pci_read_config(pci_dev, &pci_cfg_status,
2131 sizeof(pci_cfg_status), PCI_CFG_STATUS_REG);
2133 PMD_DRV_LOG(ERR, "Failed to read PCI offset 0x%x",
2134 PCI_CFG_STATUS_REG);
2138 if (pci_cfg_status & FLUSH_DESC_REQUIRED)
2139 e1000_flush_rx_ring(dev);