4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
44 #include <rte_interrupts.h>
45 #include <rte_byteorder.h>
46 #include <rte_common.h>
48 #include <rte_debug.h>
50 #include <rte_memory.h>
51 #include <rte_memcpy.h>
52 #include <rte_memzone.h>
53 #include <rte_launch.h>
54 #include <rte_tailq.h>
56 #include <rte_per_lcore.h>
57 #include <rte_lcore.h>
58 #include <rte_atomic.h>
59 #include <rte_branch_prediction.h>
61 #include <rte_mempool.h>
62 #include <rte_malloc.h>
64 #include <rte_ether.h>
65 #include <rte_ethdev.h>
66 #include <rte_prefetch.h>
71 #include <rte_string_fns.h>
73 #include "e1000_logs.h"
74 #include "e1000/e1000_api.h"
75 #include "e1000_ethdev.h"
76 #include "e1000/e1000_osdep.h"
78 #define E1000_TXD_VLAN_SHIFT 16
80 #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
82 static inline struct rte_mbuf *
83 rte_rxmbuf_alloc(struct rte_mempool *mp)
87 m = __rte_mbuf_raw_alloc(mp);
88 __rte_mbuf_sanity_check_raw(m, 0);
92 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
93 (uint64_t) ((mb)->buf_physaddr + \
94 (uint64_t) ((char *)((mb)->data) - (char *)(mb)->buf_addr))
96 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
97 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
100 * Structure associated with each descriptor of the RX ring of a RX queue.
103 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
107 * Structure associated with each descriptor of the TX ring of a TX queue.
110 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
111 uint16_t next_id; /**< Index of next descriptor in ring. */
112 uint16_t last_id; /**< Index of last scattered descriptor. */
116 * Structure associated with each RX queue.
119 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
120 volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
121 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
122 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
123 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
124 struct em_rx_entry *sw_ring; /**< address of RX software ring. */
125 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
126 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
127 uint16_t nb_rx_desc; /**< number of RX descriptors. */
128 uint16_t rx_tail; /**< current value of RDT register. */
129 uint16_t nb_rx_hold; /**< number of held free RX desc. */
130 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
131 uint16_t queue_id; /**< RX queue index. */
132 uint8_t port_id; /**< Device port identifier. */
133 uint8_t pthresh; /**< Prefetch threshold register. */
134 uint8_t hthresh; /**< Host threshold register. */
135 uint8_t wthresh; /**< Write-back threshold register. */
136 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
140 * Hardware context number
143 EM_CTX_0 = 0, /**< CTX0 */
144 EM_CTX_NUM = 1, /**< CTX NUM */
147 /** Offload features */
148 union em_vlan_macip {
151 uint16_t l3_len:9; /**< L3 (IP) Header Length. */
152 uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
154 /**< VLAN Tag Control Identifier (CPU order). */
159 * Compare mask for vlan_macip_len.data,
160 * should be in sync with em_vlan_macip.f layout.
162 #define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
163 #define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
164 #define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
165 /** MAC+IP length. */
166 #define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
169 * Structure to check if new context need be built
172 uint16_t flags; /**< ol_flags related to context build. */
173 uint32_t cmp_mask; /**< compare mask */
174 union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
178 * Structure associated with each TX queue.
181 volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
182 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
183 struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
184 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
185 uint16_t nb_tx_desc; /**< number of TX descriptors. */
186 uint16_t tx_tail; /**< Current value of TDT register. */
187 uint16_t tx_free_thresh;/**< minimum TX before freeing. */
188 /**< Number of TX descriptors to use before RS bit is set. */
189 uint16_t tx_rs_thresh;
190 /** Number of TX descriptors used since RS bit was set. */
192 /** Index to last TX descriptor to have been cleaned. */
193 uint16_t last_desc_cleaned;
194 /** Total number of TX descriptors ready to be allocated. */
196 uint16_t queue_id; /**< TX queue index. */
197 uint8_t port_id; /**< Device port identifier. */
198 uint8_t pthresh; /**< Prefetch threshold register. */
199 uint8_t hthresh; /**< Host threshold register. */
200 uint8_t wthresh; /**< Write-back threshold register. */
201 struct em_ctx_info ctx_cache;
202 /**< Hardware context history.*/
206 #define RTE_PMD_USE_PREFETCH
209 #ifdef RTE_PMD_USE_PREFETCH
210 #define rte_em_prefetch(p) rte_prefetch0(p)
212 #define rte_em_prefetch(p) do {} while(0)
215 #ifdef RTE_PMD_PACKET_PREFETCH
216 #define rte_packet_prefetch(p) rte_prefetch1(p)
218 #define rte_packet_prefetch(p) do {} while(0)
221 #ifndef DEFAULT_TX_FREE_THRESH
222 #define DEFAULT_TX_FREE_THRESH 32
223 #endif /* DEFAULT_TX_FREE_THRESH */
225 #ifndef DEFAULT_TX_RS_THRESH
226 #define DEFAULT_TX_RS_THRESH 32
227 #endif /* DEFAULT_TX_RS_THRESH */
230 /*********************************************************************
234 **********************************************************************/
237 * Populates TX context descriptor.
240 em_set_xmit_ctx(struct em_tx_queue* txq,
241 volatile struct e1000_context_desc *ctx_txd,
243 union em_vlan_macip hdrlen)
245 uint32_t cmp_mask, cmd_len;
246 uint16_t ipcse, l2len;
247 struct e1000_context_desc ctx;
250 cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
252 l2len = hdrlen.f.l2_len;
253 ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
255 /* setup IPCS* fields */
256 ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
257 ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
258 offsetof(struct ipv4_hdr, hdr_checksum));
261 * When doing checksum or TCP segmentation with IPv6 headers,
262 * IPCSE field should be set t0 0.
264 if (flags & PKT_TX_IP_CKSUM) {
265 ctx.lower_setup.ip_fields.ipcse =
266 (uint16_t)rte_cpu_to_le_16(ipcse - 1);
267 cmd_len |= E1000_TXD_CMD_IP;
268 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
270 ctx.lower_setup.ip_fields.ipcse = 0;
273 /* setup TUCS* fields */
274 ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
275 ctx.upper_setup.tcp_fields.tucse = 0;
277 switch (flags & PKT_TX_L4_MASK) {
278 case PKT_TX_UDP_CKSUM:
279 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
280 offsetof(struct udp_hdr, dgram_cksum));
281 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
283 case PKT_TX_TCP_CKSUM:
284 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
285 offsetof(struct tcp_hdr, cksum));
286 cmd_len |= E1000_TXD_CMD_TCP;
287 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
290 ctx.upper_setup.tcp_fields.tucso = 0;
293 ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
294 ctx.tcp_seg_setup.data = 0;
298 txq->ctx_cache.flags = flags;
299 txq->ctx_cache.cmp_mask = cmp_mask;
300 txq->ctx_cache.hdrlen = hdrlen;
304 * Check which hardware context can be used. Use the existing match
305 * or create a new context descriptor.
307 static inline uint32_t
308 what_ctx_update(struct em_tx_queue *txq, uint16_t flags,
309 union em_vlan_macip hdrlen)
311 /* If match with the current context */
312 if (likely (txq->ctx_cache.flags == flags &&
313 ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
314 txq->ctx_cache.cmp_mask) == 0))
321 /* Reset transmit descriptors after they have been used */
323 em_xmit_cleanup(struct em_tx_queue *txq)
325 struct em_tx_entry *sw_ring = txq->sw_ring;
326 volatile struct e1000_data_desc *txr = txq->tx_ring;
327 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
328 uint16_t nb_tx_desc = txq->nb_tx_desc;
329 uint16_t desc_to_clean_to;
330 uint16_t nb_tx_to_clean;
332 /* Determine the last descriptor needing to be cleaned */
333 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
334 if (desc_to_clean_to >= nb_tx_desc)
335 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
337 /* Check to make sure the last descriptor to clean is done */
338 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
339 if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
341 PMD_TX_FREE_LOG(DEBUG,
342 "TX descriptor %4u is not done"
343 "(port=%d queue=%d)",
345 txq->port_id, txq->queue_id);
346 /* Failed to clean any descriptors, better luck next time */
350 /* Figure out how many descriptors will be cleaned */
351 if (last_desc_cleaned > desc_to_clean_to)
352 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
355 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
358 PMD_TX_FREE_LOG(DEBUG,
359 "Cleaning %4u TX descriptors: %4u to %4u "
360 "(port=%d queue=%d)",
361 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
362 txq->port_id, txq->queue_id);
365 * The last descriptor to clean is done, so that means all the
366 * descriptors from the last descriptor that was cleaned
367 * up to the last descriptor with the RS bit set
368 * are done. Only reset the threshold descriptor.
370 txr[desc_to_clean_to].upper.fields.status = 0;
372 /* Update the txq to reflect the last descriptor that was cleaned */
373 txq->last_desc_cleaned = desc_to_clean_to;
374 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
380 static inline uint32_t
381 tx_desc_cksum_flags_to_upper(uint16_t ol_flags)
383 static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
384 static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
387 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
388 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
393 eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
396 struct em_tx_queue *txq;
397 struct em_tx_entry *sw_ring;
398 struct em_tx_entry *txe, *txn;
399 volatile struct e1000_data_desc *txr;
400 volatile struct e1000_data_desc *txd;
401 struct rte_mbuf *tx_pkt;
402 struct rte_mbuf *m_seg;
403 uint64_t buf_dma_addr;
405 uint32_t cmd_type_len;
415 union em_vlan_macip hdrlen;
418 sw_ring = txq->sw_ring;
420 tx_id = txq->tx_tail;
421 txe = &sw_ring[tx_id];
423 /* Determine if the descriptor ring needs to be cleaned. */
424 if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
425 em_xmit_cleanup(txq);
429 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
433 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
436 * Determine how many (if any) context descriptors
437 * are needed for offload functionality.
439 ol_flags = tx_pkt->ol_flags;
441 /* If hardware offload required */
442 tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
445 hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
446 hdrlen.f.l2_len = tx_pkt->l2_len;
447 hdrlen.f.l3_len = tx_pkt->l3_len;
448 /* If new context to be built or reuse the exist ctx. */
449 ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
451 /* Only allocate context descriptor if required*/
452 new_ctx = (ctx == EM_CTX_NUM);
456 * Keep track of how many descriptors are used this loop
457 * This will always be the number of segments + the number of
458 * Context descriptors required to transmit the packet
460 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
463 * The number of descriptors that must be allocated for a
464 * packet is the number of segments of that packet, plus 1
465 * Context Descriptor for the hardware offload, if any.
466 * Determine the last TX descriptor to allocate in the TX ring
467 * for the packet, starting from the current position (tx_id)
470 tx_last = (uint16_t) (tx_id + nb_used - 1);
473 if (tx_last >= txq->nb_tx_desc)
474 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
476 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
477 " tx_first=%u tx_last=%u\n",
478 (unsigned) txq->port_id,
479 (unsigned) txq->queue_id,
480 (unsigned) tx_pkt->pkt_len,
485 * Make sure there are enough TX descriptors available to
486 * transmit the entire packet.
487 * nb_used better be less than or equal to txq->tx_rs_thresh
489 while (unlikely (nb_used > txq->nb_tx_free)) {
490 PMD_TX_FREE_LOG(DEBUG,
491 "Not enough free TX descriptors "
492 "nb_used=%4u nb_free=%4u "
493 "(port=%d queue=%d)",
494 nb_used, txq->nb_tx_free,
495 txq->port_id, txq->queue_id);
497 if (em_xmit_cleanup(txq) != 0) {
498 /* Could not clean any descriptors */
506 * By now there are enough free TX descriptors to transmit
511 * Set common flags of all TX Data Descriptors.
513 * The following bits must be set in all Data Descriptors:
514 * - E1000_TXD_DTYP_DATA
515 * - E1000_TXD_DTYP_DEXT
517 * The following bits must be set in the first Data Descriptor
518 * and are ignored in the other ones:
519 * - E1000_TXD_POPTS_IXSM
520 * - E1000_TXD_POPTS_TXSM
522 * The following bits must be set in the last Data Descriptor
523 * and are ignored in the other ones:
524 * - E1000_TXD_CMD_VLE
525 * - E1000_TXD_CMD_IFCS
527 * The following bits must only be set in the last Data
529 * - E1000_TXD_CMD_EOP
531 * The following bits can be set in any Data Descriptor, but
532 * are only set in the last Data Descriptor:
535 cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
539 /* Set VLAN Tag offload fields. */
540 if (ol_flags & PKT_TX_VLAN_PKT) {
541 cmd_type_len |= E1000_TXD_CMD_VLE;
542 popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
547 * Setup the TX Context Descriptor if required
550 volatile struct e1000_context_desc *ctx_txd;
552 ctx_txd = (volatile struct e1000_context_desc *)
555 txn = &sw_ring[txe->next_id];
556 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
558 if (txe->mbuf != NULL) {
559 rte_pktmbuf_free_seg(txe->mbuf);
563 em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
566 txe->last_id = tx_last;
567 tx_id = txe->next_id;
572 * Setup the TX Data Descriptor,
573 * This path will go through
574 * whatever new/reuse the context descriptor
576 popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
582 txn = &sw_ring[txe->next_id];
584 if (txe->mbuf != NULL)
585 rte_pktmbuf_free_seg(txe->mbuf);
589 * Set up Transmit Data Descriptor.
591 slen = m_seg->data_len;
592 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
594 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
595 txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
596 txd->upper.data = rte_cpu_to_le_32(popts_spec);
598 txe->last_id = tx_last;
599 tx_id = txe->next_id;
602 } while (m_seg != NULL);
605 * The last packet data descriptor needs End Of Packet (EOP)
607 cmd_type_len |= E1000_TXD_CMD_EOP;
608 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
609 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
611 /* Set RS bit only on threshold packets' last descriptor */
612 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
613 PMD_TX_FREE_LOG(DEBUG,
614 "Setting RS bit on TXD id="
615 "%4u (port=%d queue=%d)",
616 tx_last, txq->port_id, txq->queue_id);
618 cmd_type_len |= E1000_TXD_CMD_RS;
620 /* Update txq RS bit counters */
623 txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
629 * Set the Transmit Descriptor Tail (TDT)
631 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
632 (unsigned) txq->port_id, (unsigned) txq->queue_id,
633 (unsigned) tx_id, (unsigned) nb_tx);
634 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
635 txq->tx_tail = tx_id;
640 /*********************************************************************
644 **********************************************************************/
646 static inline uint16_t
647 rx_desc_status_to_pkt_flags(uint32_t rx_status)
651 /* Check if VLAN present */
652 pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
653 PKT_RX_VLAN_PKT : 0);
658 static inline uint16_t
659 rx_desc_error_to_pkt_flags(uint32_t rx_error)
661 uint16_t pkt_flags = 0;
663 if (rx_error & E1000_RXD_ERR_IPE)
664 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
665 if (rx_error & E1000_RXD_ERR_TCPE)
666 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
671 eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
674 volatile struct e1000_rx_desc *rx_ring;
675 volatile struct e1000_rx_desc *rxdp;
676 struct em_rx_queue *rxq;
677 struct em_rx_entry *sw_ring;
678 struct em_rx_entry *rxe;
679 struct rte_mbuf *rxm;
680 struct rte_mbuf *nmb;
681 struct e1000_rx_desc rxd;
693 rx_id = rxq->rx_tail;
694 rx_ring = rxq->rx_ring;
695 sw_ring = rxq->sw_ring;
696 while (nb_rx < nb_pkts) {
698 * The order of operations here is important as the DD status
699 * bit must not be read after any other descriptor fields.
700 * rx_ring and rxdp are pointing to volatile data so the order
701 * of accesses cannot be reordered by the compiler. If they were
702 * not volatile, they could be reordered which could lead to
703 * using invalid descriptor fields when read from rxd.
705 rxdp = &rx_ring[rx_id];
706 status = rxdp->status;
707 if (! (status & E1000_RXD_STAT_DD))
714 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
715 * likely to be invalid and to be dropped by the various
716 * validation checks performed by the network stack.
718 * Allocate a new mbuf to replenish the RX ring descriptor.
719 * If the allocation fails:
720 * - arrange for that RX descriptor to be the first one
721 * being parsed the next time the receive function is
722 * invoked [on the same queue].
724 * - Stop parsing the RX ring and return immediately.
726 * This policy do not drop the packet received in the RX
727 * descriptor for which the allocation of a new mbuf failed.
728 * Thus, it allows that packet to be later retrieved if
729 * mbuf have been freed in the mean time.
730 * As a side effect, holding RX descriptors instead of
731 * systematically giving them back to the NIC may lead to
732 * RX ring exhaustion situations.
733 * However, the NIC can gracefully prevent such situations
734 * to happen by sending specific "back-pressure" flow control
735 * frames to its peer(s).
737 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
738 "status=0x%x pkt_len=%u\n",
739 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
740 (unsigned) rx_id, (unsigned) status,
741 (unsigned) rte_le_to_cpu_16(rxd.length));
743 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
745 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
747 (unsigned) rxq->port_id,
748 (unsigned) rxq->queue_id);
749 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
754 rxe = &sw_ring[rx_id];
756 if (rx_id == rxq->nb_rx_desc)
759 /* Prefetch next mbuf while processing current one. */
760 rte_em_prefetch(sw_ring[rx_id].mbuf);
763 * When next RX descriptor is on a cache-line boundary,
764 * prefetch the next 4 RX descriptors and the next 8 pointers
767 if ((rx_id & 0x3) == 0) {
768 rte_em_prefetch(&rx_ring[rx_id]);
769 rte_em_prefetch(&sw_ring[rx_id]);
772 /* Rearm RXD: attach new mbuf and reset status to zero. */
777 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
778 rxdp->buffer_addr = dma_addr;
782 * Initialize the returned mbuf.
783 * 1) setup generic mbuf fields:
784 * - number of segments,
787 * - RX port identifier.
788 * 2) integrate hardware offload data, if any:
790 * - IP checksum flag,
791 * - VLAN TCI, if any,
794 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
796 rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
797 rte_packet_prefetch(rxm->data);
800 rxm->pkt_len = pkt_len;
801 rxm->data_len = pkt_len;
802 rxm->port = rxq->port_id;
804 rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
805 rxm->ol_flags = (uint16_t)(rxm->ol_flags |
806 rx_desc_error_to_pkt_flags(rxd.errors));
808 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
809 rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
812 * Store the mbuf address into the next entry of the array
813 * of returned packets.
815 rx_pkts[nb_rx++] = rxm;
817 rxq->rx_tail = rx_id;
820 * If the number of free RX descriptors is greater than the RX free
821 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
823 * Update the RDT with the value of the last processed RX descriptor
824 * minus 1, to guarantee that the RDT register is never equal to the
825 * RDH register, which creates a "full" ring situtation from the
826 * hardware point of view...
828 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
829 if (nb_hold > rxq->rx_free_thresh) {
830 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
831 "nb_hold=%u nb_rx=%u\n",
832 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
833 (unsigned) rx_id, (unsigned) nb_hold,
835 rx_id = (uint16_t) ((rx_id == 0) ?
836 (rxq->nb_rx_desc - 1) : (rx_id - 1));
837 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
840 rxq->nb_rx_hold = nb_hold;
845 eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
848 struct em_rx_queue *rxq;
849 volatile struct e1000_rx_desc *rx_ring;
850 volatile struct e1000_rx_desc *rxdp;
851 struct em_rx_entry *sw_ring;
852 struct em_rx_entry *rxe;
853 struct rte_mbuf *first_seg;
854 struct rte_mbuf *last_seg;
855 struct rte_mbuf *rxm;
856 struct rte_mbuf *nmb;
857 struct e1000_rx_desc rxd;
858 uint64_t dma; /* Physical address of mbuf data buffer */
869 rx_id = rxq->rx_tail;
870 rx_ring = rxq->rx_ring;
871 sw_ring = rxq->sw_ring;
874 * Retrieve RX context of current packet, if any.
876 first_seg = rxq->pkt_first_seg;
877 last_seg = rxq->pkt_last_seg;
879 while (nb_rx < nb_pkts) {
882 * The order of operations here is important as the DD status
883 * bit must not be read after any other descriptor fields.
884 * rx_ring and rxdp are pointing to volatile data so the order
885 * of accesses cannot be reordered by the compiler. If they were
886 * not volatile, they could be reordered which could lead to
887 * using invalid descriptor fields when read from rxd.
889 rxdp = &rx_ring[rx_id];
890 status = rxdp->status;
891 if (! (status & E1000_RXD_STAT_DD))
898 * Allocate a new mbuf to replenish the RX ring descriptor.
899 * If the allocation fails:
900 * - arrange for that RX descriptor to be the first one
901 * being parsed the next time the receive function is
902 * invoked [on the same queue].
904 * - Stop parsing the RX ring and return immediately.
906 * This policy does not drop the packet received in the RX
907 * descriptor for which the allocation of a new mbuf failed.
908 * Thus, it allows that packet to be later retrieved if
909 * mbuf have been freed in the mean time.
910 * As a side effect, holding RX descriptors instead of
911 * systematically giving them back to the NIC may lead to
912 * RX ring exhaustion situations.
913 * However, the NIC can gracefully prevent such situations
914 * to happen by sending specific "back-pressure" flow control
915 * frames to its peer(s).
917 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
918 "status=0x%x data_len=%u\n",
919 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
920 (unsigned) rx_id, (unsigned) status,
921 (unsigned) rte_le_to_cpu_16(rxd.length));
923 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
925 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
926 "queue_id=%u\n", (unsigned) rxq->port_id,
927 (unsigned) rxq->queue_id);
928 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
933 rxe = &sw_ring[rx_id];
935 if (rx_id == rxq->nb_rx_desc)
938 /* Prefetch next mbuf while processing current one. */
939 rte_em_prefetch(sw_ring[rx_id].mbuf);
942 * When next RX descriptor is on a cache-line boundary,
943 * prefetch the next 4 RX descriptors and the next 8 pointers
946 if ((rx_id & 0x3) == 0) {
947 rte_em_prefetch(&rx_ring[rx_id]);
948 rte_em_prefetch(&sw_ring[rx_id]);
952 * Update RX descriptor with the physical address of the new
953 * data buffer of the new allocated mbuf.
957 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
958 rxdp->buffer_addr = dma;
962 * Set data length & data buffer address of mbuf.
964 data_len = rte_le_to_cpu_16(rxd.length);
965 rxm->data_len = data_len;
966 rxm->data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
969 * If this is the first buffer of the received packet,
970 * set the pointer to the first mbuf of the packet and
971 * initialize its context.
972 * Otherwise, update the total length and the number of segments
973 * of the current scattered packet, and update the pointer to
974 * the last mbuf of the current packet.
976 if (first_seg == NULL) {
978 first_seg->pkt_len = data_len;
979 first_seg->nb_segs = 1;
981 first_seg->pkt_len += data_len;
982 first_seg->nb_segs++;
983 last_seg->next = rxm;
987 * If this is not the last buffer of the received packet,
988 * update the pointer to the last mbuf of the current scattered
989 * packet and continue to parse the RX ring.
991 if (! (status & E1000_RXD_STAT_EOP)) {
997 * This is the last buffer of the received packet.
998 * If the CRC is not stripped by the hardware:
999 * - Subtract the CRC length from the total packet length.
1000 * - If the last buffer only contains the whole CRC or a part
1001 * of it, free the mbuf associated to the last buffer.
1002 * If part of the CRC is also contained in the previous
1003 * mbuf, subtract the length of that CRC part from the
1004 * data length of the previous mbuf.
1007 if (unlikely(rxq->crc_len > 0)) {
1008 first_seg->pkt_len -= ETHER_CRC_LEN;
1009 if (data_len <= ETHER_CRC_LEN) {
1010 rte_pktmbuf_free_seg(rxm);
1011 first_seg->nb_segs--;
1012 last_seg->data_len = (uint16_t)
1013 (last_seg->data_len -
1014 (ETHER_CRC_LEN - data_len));
1015 last_seg->next = NULL;
1018 (uint16_t) (data_len - ETHER_CRC_LEN);
1022 * Initialize the first mbuf of the returned packet:
1023 * - RX port identifier,
1024 * - hardware offload data, if any:
1025 * - IP checksum flag,
1028 first_seg->port = rxq->port_id;
1030 first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
1031 first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
1032 rx_desc_error_to_pkt_flags(rxd.errors));
1034 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1035 rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
1037 /* Prefetch data of first segment, if configured to do so. */
1038 rte_packet_prefetch(first_seg->data);
1041 * Store the mbuf address into the next entry of the array
1042 * of returned packets.
1044 rx_pkts[nb_rx++] = first_seg;
1047 * Setup receipt context for a new packet.
1053 * Record index of the next RX descriptor to probe.
1055 rxq->rx_tail = rx_id;
1058 * Save receive context.
1060 rxq->pkt_first_seg = first_seg;
1061 rxq->pkt_last_seg = last_seg;
1064 * If the number of free RX descriptors is greater than the RX free
1065 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1067 * Update the RDT with the value of the last processed RX descriptor
1068 * minus 1, to guarantee that the RDT register is never equal to the
1069 * RDH register, which creates a "full" ring situtation from the
1070 * hardware point of view...
1072 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1073 if (nb_hold > rxq->rx_free_thresh) {
1074 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1075 "nb_hold=%u nb_rx=%u\n",
1076 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1077 (unsigned) rx_id, (unsigned) nb_hold,
1079 rx_id = (uint16_t) ((rx_id == 0) ?
1080 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1081 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1084 rxq->nb_rx_hold = nb_hold;
1089 * Rings setup and release.
1091 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1092 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1093 * This will also optimize cache line size effect.
1094 * H/W supports up to cache line size 128.
1096 #define EM_ALIGN 128
1099 * Maximum number of Ring Descriptors.
1101 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
1102 * desscriptors should meet the following condition:
1103 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1105 #define EM_MIN_RING_DESC 32
1106 #define EM_MAX_RING_DESC 4096
1108 #define EM_MAX_BUF_SIZE 16384
1109 #define EM_RCTL_FLXBUF_STEP 1024
1111 static const struct rte_memzone *
1112 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1113 uint16_t queue_id, uint32_t ring_size, int socket_id)
1115 const struct rte_memzone *mz;
1116 char z_name[RTE_MEMZONE_NAMESIZE];
1118 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1119 dev->driver->pci_drv.name, ring_name, dev->data->port_id,
1122 if ((mz = rte_memzone_lookup(z_name)) != 0)
1125 #ifdef RTE_LIBRTE_XEN_DOM0
1126 return rte_memzone_reserve_bounded(z_name, ring_size,
1127 socket_id, 0, CACHE_LINE_SIZE, RTE_PGSIZE_2M);
1129 return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
1134 em_tx_queue_release_mbufs(struct em_tx_queue *txq)
1138 if (txq->sw_ring != NULL) {
1139 for (i = 0; i != txq->nb_tx_desc; i++) {
1140 if (txq->sw_ring[i].mbuf != NULL) {
1141 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1142 txq->sw_ring[i].mbuf = NULL;
1149 em_tx_queue_release(struct em_tx_queue *txq)
1152 em_tx_queue_release_mbufs(txq);
1153 rte_free(txq->sw_ring);
1159 eth_em_tx_queue_release(void *txq)
1161 em_tx_queue_release(txq);
1164 /* (Re)set dynamic em_tx_queue fields to defaults */
1166 em_reset_tx_queue(struct em_tx_queue *txq)
1168 uint16_t i, nb_desc, prev;
1169 static const struct e1000_data_desc txd_init = {
1170 .upper.fields = {.status = E1000_TXD_STAT_DD},
1173 nb_desc = txq->nb_tx_desc;
1175 /* Initialize ring entries */
1177 prev = (uint16_t) (nb_desc - 1);
1179 for (i = 0; i < nb_desc; i++) {
1180 txq->tx_ring[i] = txd_init;
1181 txq->sw_ring[i].mbuf = NULL;
1182 txq->sw_ring[i].last_id = i;
1183 txq->sw_ring[prev].next_id = i;
1188 * Always allow 1 descriptor to be un-allocated to avoid
1189 * a H/W race condition
1191 txq->nb_tx_free = (uint16_t)(nb_desc - 1);
1192 txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
1193 txq->nb_tx_used = 0;
1196 memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
1200 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
1203 unsigned int socket_id,
1204 const struct rte_eth_txconf *tx_conf)
1206 const struct rte_memzone *tz;
1207 struct em_tx_queue *txq;
1208 struct e1000_hw *hw;
1210 uint16_t tx_rs_thresh, tx_free_thresh;
1212 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1215 * Validate number of transmit descriptors.
1216 * It must not exceed hardware maximum, and must be multiple
1219 if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
1220 (nb_desc > EM_MAX_RING_DESC) ||
1221 (nb_desc < EM_MIN_RING_DESC)) {
1225 tx_free_thresh = tx_conf->tx_free_thresh;
1226 if (tx_free_thresh == 0)
1227 tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
1228 DEFAULT_TX_FREE_THRESH);
1230 tx_rs_thresh = tx_conf->tx_rs_thresh;
1231 if (tx_rs_thresh == 0)
1232 tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
1233 DEFAULT_TX_RS_THRESH);
1235 if (tx_free_thresh >= (nb_desc - 3)) {
1236 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
1237 "number of TX descriptors minus 3. (tx_free_thresh=%u "
1238 "port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
1239 (int)dev->data->port_id, (int)queue_idx);
1242 if (tx_rs_thresh > tx_free_thresh) {
1243 RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
1244 "tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
1245 "port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
1246 (unsigned int)tx_rs_thresh, (int)dev->data->port_id,
1252 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1253 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1254 * by the NIC and all descriptors are written back after the NIC
1255 * accumulates WTHRESH descriptors.
1257 if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
1258 RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
1259 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
1260 "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
1261 (int)dev->data->port_id, (int)queue_idx);
1265 /* Free memory prior to re-allocation if needed... */
1266 if (dev->data->tx_queues[queue_idx] != NULL) {
1267 em_tx_queue_release(dev->data->tx_queues[queue_idx]);
1268 dev->data->tx_queues[queue_idx] = NULL;
1272 * Allocate TX ring hardware descriptors. A memzone large enough to
1273 * handle the maximum ring size is allocated in order to allow for
1274 * resizing in later calls to the queue setup function.
1276 tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
1277 if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
1278 socket_id)) == NULL)
1281 /* Allocate the tx queue data structure. */
1282 if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
1283 CACHE_LINE_SIZE)) == NULL)
1286 /* Allocate software ring */
1287 if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
1288 sizeof(txq->sw_ring[0]) * nb_desc,
1289 CACHE_LINE_SIZE)) == NULL) {
1290 em_tx_queue_release(txq);
1294 txq->nb_tx_desc = nb_desc;
1295 txq->tx_free_thresh = tx_free_thresh;
1296 txq->tx_rs_thresh = tx_rs_thresh;
1297 txq->pthresh = tx_conf->tx_thresh.pthresh;
1298 txq->hthresh = tx_conf->tx_thresh.hthresh;
1299 txq->wthresh = tx_conf->tx_thresh.wthresh;
1300 txq->queue_id = queue_idx;
1301 txq->port_id = dev->data->port_id;
1303 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1304 #ifndef RTE_LIBRTE_XEN_DOM0
1305 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1307 txq->tx_ring_phys_addr = rte_mem_phy2mch(tz->memseg_id, tz->phys_addr);
1309 txq->tx_ring = (struct e1000_data_desc *) tz->addr;
1311 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1312 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1314 em_reset_tx_queue(txq);
1316 dev->data->tx_queues[queue_idx] = txq;
1321 em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
1325 if (rxq->sw_ring != NULL) {
1326 for (i = 0; i != rxq->nb_rx_desc; i++) {
1327 if (rxq->sw_ring[i].mbuf != NULL) {
1328 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1329 rxq->sw_ring[i].mbuf = NULL;
1336 em_rx_queue_release(struct em_rx_queue *rxq)
1339 em_rx_queue_release_mbufs(rxq);
1340 rte_free(rxq->sw_ring);
1346 eth_em_rx_queue_release(void *rxq)
1348 em_rx_queue_release(rxq);
1351 /* Reset dynamic em_rx_queue fields back to defaults */
1353 em_reset_rx_queue(struct em_rx_queue *rxq)
1356 rxq->nb_rx_hold = 0;
1357 rxq->pkt_first_seg = NULL;
1358 rxq->pkt_last_seg = NULL;
1362 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
1365 unsigned int socket_id,
1366 const struct rte_eth_rxconf *rx_conf,
1367 struct rte_mempool *mp)
1369 const struct rte_memzone *rz;
1370 struct em_rx_queue *rxq;
1371 struct e1000_hw *hw;
1374 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1377 * Validate number of receive descriptors.
1378 * It must not exceed hardware maximum, and must be multiple
1381 if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
1382 (nb_desc > EM_MAX_RING_DESC) ||
1383 (nb_desc < EM_MIN_RING_DESC)) {
1388 * EM devices don't support drop_en functionality
1390 if (rx_conf->rx_drop_en) {
1391 RTE_LOG(ERR, PMD, "drop_en functionality not supported by device\n");
1395 /* Free memory prior to re-allocation if needed. */
1396 if (dev->data->rx_queues[queue_idx] != NULL) {
1397 em_rx_queue_release(dev->data->rx_queues[queue_idx]);
1398 dev->data->rx_queues[queue_idx] = NULL;
1401 /* Allocate RX ring for max possible mumber of hardware descriptors. */
1402 rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
1403 if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
1404 socket_id)) == NULL)
1407 /* Allocate the RX queue data structure. */
1408 if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
1409 CACHE_LINE_SIZE)) == NULL)
1412 /* Allocate software ring. */
1413 if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1414 sizeof (rxq->sw_ring[0]) * nb_desc,
1415 CACHE_LINE_SIZE)) == NULL) {
1416 em_rx_queue_release(rxq);
1421 rxq->nb_rx_desc = nb_desc;
1422 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1423 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1424 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1425 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1426 rxq->queue_id = queue_idx;
1427 rxq->port_id = dev->data->port_id;
1428 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
1431 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1432 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
1433 #ifndef RTE_LIBRTE_XEN_DOM0
1434 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1436 rxq->rx_ring_phys_addr = rte_mem_phy2mch(rz->memseg_id, rz->phys_addr);
1438 rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
1440 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1441 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1443 dev->data->rx_queues[queue_idx] = rxq;
1444 em_reset_rx_queue(rxq);
1450 eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1452 #define EM_RXQ_SCAN_INTERVAL 4
1453 volatile struct e1000_rx_desc *rxdp;
1454 struct em_rx_queue *rxq;
1457 if (rx_queue_id >= dev->data->nb_rx_queues) {
1458 PMD_RX_LOG(DEBUG,"Invalid RX queue_id=%d\n", rx_queue_id);
1462 rxq = dev->data->rx_queues[rx_queue_id];
1463 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1465 while ((desc < rxq->nb_rx_desc) &&
1466 (rxdp->status & E1000_RXD_STAT_DD)) {
1467 desc += EM_RXQ_SCAN_INTERVAL;
1468 rxdp += EM_RXQ_SCAN_INTERVAL;
1469 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1470 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1471 desc - rxq->nb_rx_desc]);
1478 eth_em_rx_descriptor_done(void *rx_queue, uint16_t offset)
1480 volatile struct e1000_rx_desc *rxdp;
1481 struct em_rx_queue *rxq = rx_queue;
1484 if (unlikely(offset >= rxq->nb_rx_desc))
1486 desc = rxq->rx_tail + offset;
1487 if (desc >= rxq->nb_rx_desc)
1488 desc -= rxq->nb_rx_desc;
1490 rxdp = &rxq->rx_ring[desc];
1491 return !!(rxdp->status & E1000_RXD_STAT_DD);
1495 em_dev_clear_queues(struct rte_eth_dev *dev)
1498 struct em_tx_queue *txq;
1499 struct em_rx_queue *rxq;
1501 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1502 txq = dev->data->tx_queues[i];
1504 em_tx_queue_release_mbufs(txq);
1505 em_reset_tx_queue(txq);
1509 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1510 rxq = dev->data->rx_queues[i];
1512 em_rx_queue_release_mbufs(rxq);
1513 em_reset_rx_queue(rxq);
1519 * Takes as input/output parameter RX buffer size.
1520 * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
1523 em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
1526 * For BSIZE & BSEX all configurable sizes are:
1527 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1528 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1529 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1530 * 2048: rctl |= E1000_RCTL_SZ_2048;
1531 * 1024: rctl |= E1000_RCTL_SZ_1024;
1532 * 512: rctl |= E1000_RCTL_SZ_512;
1533 * 256: rctl |= E1000_RCTL_SZ_256;
1535 static const struct {
1538 } bufsz_to_rctl[] = {
1539 {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
1540 {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
1541 {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
1542 {2048, E1000_RCTL_SZ_2048},
1543 {1024, E1000_RCTL_SZ_1024},
1544 {512, E1000_RCTL_SZ_512},
1545 {256, E1000_RCTL_SZ_256},
1549 uint32_t rctl_bsize;
1551 rctl_bsize = *bufsz;
1554 * Starting from 82571 it is possible to specify RX buffer size
1555 * by RCTL.FLXBUF. When this field is different from zero, the
1556 * RX buffer size = RCTL.FLXBUF * 1K
1557 * (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
1558 * It is working ok on real HW, but by some reason doesn't work
1559 * on VMware emulated 82574L.
1560 * So for now, always use BSIZE/BSEX to setup RX buffer size.
1561 * If you don't plan to use it on VMware emulated 82574L and
1562 * would like to specify RX buffer size in 1K granularity,
1563 * uncomment the following lines:
1564 * ***************************************************************
1565 * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
1566 * rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
1567 * rctl_bsize /= EM_RCTL_FLXBUF_STEP;
1568 * *bufsz = rctl_bsize;
1569 * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
1570 * E1000_RCTL_FLXBUF_MASK);
1572 * ***************************************************************
1575 for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
1577 if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
1578 *bufsz = bufsz_to_rctl[i].bufsz;
1579 return (bufsz_to_rctl[i].rctl);
1583 /* Should never happen. */
1588 em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
1590 struct em_rx_entry *rxe = rxq->sw_ring;
1593 static const struct e1000_rx_desc rxd_init = {
1597 /* Initialize software ring entries */
1598 for (i = 0; i < rxq->nb_rx_desc; i++) {
1599 volatile struct e1000_rx_desc *rxd;
1600 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1603 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1604 "queue_id=%hu\n", rxq->queue_id);
1608 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1610 /* Clear HW ring memory */
1611 rxq->rx_ring[i] = rxd_init;
1613 rxd = &rxq->rx_ring[i];
1614 rxd->buffer_addr = dma_addr;
1621 /*********************************************************************
1623 * Enable receive unit.
1625 **********************************************************************/
1627 eth_em_rx_init(struct rte_eth_dev *dev)
1629 struct e1000_hw *hw;
1630 struct em_rx_queue *rxq;
1634 uint32_t rctl_bsize;
1638 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1641 * Make sure receives are disabled while setting
1642 * up the descriptor ring.
1644 rctl = E1000_READ_REG(hw, E1000_RCTL);
1645 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1647 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1649 /* Disable extended descriptor type. */
1650 rfctl &= ~E1000_RFCTL_EXTEN;
1651 /* Disable accelerated acknowledge */
1652 if (hw->mac.type == e1000_82574)
1653 rfctl |= E1000_RFCTL_ACK_DIS;
1655 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1658 * XXX TEMPORARY WORKAROUND: on some systems with 82573
1659 * long latencies are observed, like Lenovo X60. This
1660 * change eliminates the problem, but since having positive
1661 * values in RDTR is a known source of problems on other
1662 * platforms another solution is being sought.
1664 if (hw->mac.type == e1000_82573)
1665 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
1667 dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
1669 /* Determine RX bufsize. */
1670 rctl_bsize = EM_MAX_BUF_SIZE;
1671 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1672 struct rte_pktmbuf_pool_private *mbp_priv;
1675 rxq = dev->data->rx_queues[i];
1676 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1677 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1678 rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
1681 rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
1683 /* Configure and enable each RX queue. */
1684 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1688 rxq = dev->data->rx_queues[i];
1690 /* Allocate buffers for descriptor rings and setup queue */
1691 ret = em_alloc_rx_queue_mbufs(rxq);
1696 * Reset crc_len in case it was changed after queue setup by a
1700 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1703 bus_addr = rxq->rx_ring_phys_addr;
1704 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1706 sizeof(*rxq->rx_ring));
1707 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1708 (uint32_t)(bus_addr >> 32));
1709 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1711 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1712 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1714 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1715 rxdctl &= 0xFE000000;
1716 rxdctl |= rxq->pthresh & 0x3F;
1717 rxdctl |= (rxq->hthresh & 0x3F) << 8;
1718 rxdctl |= (rxq->wthresh & 0x3F) << 16;
1719 rxdctl |= E1000_RXDCTL_GRAN;
1720 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1723 * Due to EM devices not having any sort of hardware
1724 * limit for packet length, jumbo frame of any size
1725 * can be accepted, thus we have to enable scattered
1726 * rx if jumbo frames are enabled (or if buffer size
1727 * is too small to accommodate non-jumbo packets)
1728 * to avoid splitting packets that don't fit into
1731 if (dev->data->dev_conf.rxmode.jumbo_frame ||
1732 rctl_bsize < ETHER_MAX_LEN) {
1734 (eth_rx_burst_t)eth_em_recv_scattered_pkts;
1735 dev->data->scattered_rx = 1;
1739 if (dev->data->dev_conf.rxmode.enable_scatter) {
1740 dev->rx_pkt_burst = eth_em_recv_scattered_pkts;
1741 dev->data->scattered_rx = 1;
1745 * Setup the Checksum Register.
1746 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1748 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1750 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1751 rxcsum |= E1000_RXCSUM_IPOFL;
1753 rxcsum &= ~E1000_RXCSUM_IPOFL;
1754 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1756 /* No MRQ or RSS support for now */
1758 /* Set early receive threshold on appropriate hw */
1759 if ((hw->mac.type == e1000_ich9lan ||
1760 hw->mac.type == e1000_pch2lan ||
1761 hw->mac.type == e1000_ich10lan) &&
1762 dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1763 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1764 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
1765 E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
1768 if (hw->mac.type == e1000_pch2lan) {
1769 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1770 e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
1772 e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
1775 /* Setup the Receive Control Register. */
1776 if (dev->data->dev_conf.rxmode.hw_strip_crc)
1777 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1779 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1781 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1782 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1783 E1000_RCTL_RDMTS_HALF |
1784 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1786 /* Make sure VLAN Filters are off. */
1787 rctl &= ~E1000_RCTL_VFE;
1788 /* Don't store bad packets. */
1789 rctl &= ~E1000_RCTL_SBP;
1790 /* Legacy descriptor type. */
1791 rctl &= ~E1000_RCTL_DTYP_MASK;
1794 * Configure support of jumbo frames, if any.
1796 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1797 rctl |= E1000_RCTL_LPE;
1799 rctl &= ~E1000_RCTL_LPE;
1801 /* Enable Receives. */
1802 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1807 /*********************************************************************
1809 * Enable transmit unit.
1811 **********************************************************************/
1813 eth_em_tx_init(struct rte_eth_dev *dev)
1815 struct e1000_hw *hw;
1816 struct em_tx_queue *txq;
1821 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1823 /* Setup the Base and Length of the Tx Descriptor Rings. */
1824 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1827 txq = dev->data->tx_queues[i];
1828 bus_addr = txq->tx_ring_phys_addr;
1829 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1831 sizeof(*txq->tx_ring));
1832 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1833 (uint32_t)(bus_addr >> 32));
1834 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1836 /* Setup the HW Tx Head and Tail descriptor pointers. */
1837 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1838 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1840 /* Setup Transmit threshold registers. */
1841 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1843 * bit 22 is reserved, on some models should always be 0,
1844 * on others - always 1.
1846 txdctl &= E1000_TXDCTL_COUNT_DESC;
1847 txdctl |= txq->pthresh & 0x3F;
1848 txdctl |= (txq->hthresh & 0x3F) << 8;
1849 txdctl |= (txq->wthresh & 0x3F) << 16;
1850 txdctl |= E1000_TXDCTL_GRAN;
1851 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1854 /* Program the Transmit Control Register. */
1855 tctl = E1000_READ_REG(hw, E1000_TCTL);
1856 tctl &= ~E1000_TCTL_CT;
1857 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1858 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1860 /* This write will effectively turn on the transmit unit. */
1861 E1000_WRITE_REG(hw, E1000_TCTL, tctl);