4 * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_interrupts.h>
47 #include <rte_byteorder.h>
48 #include <rte_common.h>
50 #include <rte_debug.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_lcore.h>
60 #include <rte_atomic.h>
61 #include <rte_branch_prediction.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_prefetch.h>
73 #include <rte_string_fns.h>
75 #include "e1000_logs.h"
76 #include "e1000/e1000_api.h"
77 #include "e1000_ethdev.h"
79 #define E1000_TXD_VLAN_SHIFT 16
81 #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
83 static inline struct rte_mbuf *
84 rte_rxmbuf_alloc(struct rte_mempool *mp)
88 m = __rte_mbuf_raw_alloc(mp);
89 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
93 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
94 (uint64_t) ((mb)->buf_physaddr + \
95 (uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr))
97 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
98 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
101 * Structure associated with each descriptor of the RX ring of a RX queue.
104 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
108 * Structure associated with each descriptor of the TX ring of a TX queue.
111 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
112 uint16_t next_id; /**< Index of next descriptor in ring. */
113 uint16_t last_id; /**< Index of last scattered descriptor. */
117 * Structure associated with each RX queue.
120 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
121 volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
122 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
123 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
124 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
125 struct em_rx_entry *sw_ring; /**< address of RX software ring. */
126 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
127 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
128 uint16_t nb_rx_desc; /**< number of RX descriptors. */
129 uint16_t rx_tail; /**< current value of RDT register. */
130 uint16_t nb_rx_hold; /**< number of held free RX desc. */
131 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
132 uint16_t queue_id; /**< RX queue index. */
133 uint8_t port_id; /**< Device port identifier. */
134 uint8_t pthresh; /**< Prefetch threshold register. */
135 uint8_t hthresh; /**< Host threshold register. */
136 uint8_t wthresh; /**< Write-back threshold register. */
137 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
141 * Hardware context number
144 EM_CTX_0 = 0, /**< CTX0 */
145 EM_CTX_NUM = 1, /**< CTX NUM */
149 * Structure to check if new context need be built
152 uint16_t flags; /**< ol_flags related to context build. */
153 uint32_t cmp_mask; /**< compare mask */
154 union rte_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
158 * Structure associated with each TX queue.
161 volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
162 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
163 struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
164 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
165 uint16_t nb_tx_desc; /**< number of TX descriptors. */
166 uint16_t tx_tail; /**< Current value of TDT register. */
167 uint16_t tx_free_thresh;/**< minimum TX before freeing. */
168 /**< Number of TX descriptors to use before RS bit is set. */
169 uint16_t tx_rs_thresh;
170 /** Number of TX descriptors used since RS bit was set. */
172 /** Index to last TX descriptor to have been cleaned. */
173 uint16_t last_desc_cleaned;
174 /** Total number of TX descriptors ready to be allocated. */
176 uint16_t queue_id; /**< TX queue index. */
177 uint8_t port_id; /**< Device port identifier. */
178 uint8_t pthresh; /**< Prefetch threshold register. */
179 uint8_t hthresh; /**< Host threshold register. */
180 uint8_t wthresh; /**< Write-back threshold register. */
181 struct em_ctx_info ctx_cache;
182 /**< Hardware context history.*/
186 #define RTE_PMD_USE_PREFETCH
189 #ifdef RTE_PMD_USE_PREFETCH
190 #define rte_em_prefetch(p) rte_prefetch0(p)
192 #define rte_em_prefetch(p) do {} while(0)
195 #ifdef RTE_PMD_PACKET_PREFETCH
196 #define rte_packet_prefetch(p) rte_prefetch1(p)
198 #define rte_packet_prefetch(p) do {} while(0)
201 #ifndef DEFAULT_TX_FREE_THRESH
202 #define DEFAULT_TX_FREE_THRESH 32
203 #endif /* DEFAULT_TX_FREE_THRESH */
205 #ifndef DEFAULT_TX_RS_THRESH
206 #define DEFAULT_TX_RS_THRESH 32
207 #endif /* DEFAULT_TX_RS_THRESH */
210 /*********************************************************************
214 **********************************************************************/
217 * Populates TX context descriptor.
220 em_set_xmit_ctx(struct em_tx_queue* txq,
221 volatile struct e1000_context_desc *ctx_txd,
223 union rte_vlan_macip hdrlen)
225 uint32_t cmp_mask, cmd_len;
226 uint16_t ipcse, l2len;
227 struct e1000_context_desc ctx;
230 cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
232 l2len = hdrlen.f.l2_len;
233 ipcse = (uint16_t)(l2len + hdrlen.f.l3_len);
235 /* setup IPCS* fields */
236 ctx.lower_setup.ip_fields.ipcss = (uint8_t)l2len;
237 ctx.lower_setup.ip_fields.ipcso = (uint8_t)(l2len +
238 offsetof(struct ipv4_hdr, hdr_checksum));
241 * When doing checksum or TCP segmentation with IPv6 headers,
242 * IPCSE field should be set t0 0.
244 if (flags & PKT_TX_IP_CKSUM) {
245 ctx.lower_setup.ip_fields.ipcse =
246 (uint16_t)rte_cpu_to_le_16(ipcse - 1);
247 cmd_len |= E1000_TXD_CMD_IP;
248 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
250 ctx.lower_setup.ip_fields.ipcse = 0;
253 /* setup TUCS* fields */
254 ctx.upper_setup.tcp_fields.tucss = (uint8_t)ipcse;
255 ctx.upper_setup.tcp_fields.tucse = 0;
257 switch (flags & PKT_TX_L4_MASK) {
258 case PKT_TX_UDP_CKSUM:
259 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
260 offsetof(struct udp_hdr, dgram_cksum));
261 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
263 case PKT_TX_TCP_CKSUM:
264 ctx.upper_setup.tcp_fields.tucso = (uint8_t)(ipcse +
265 offsetof(struct tcp_hdr, cksum));
266 cmd_len |= E1000_TXD_CMD_TCP;
267 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
270 ctx.upper_setup.tcp_fields.tucso = 0;
273 ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
274 ctx.tcp_seg_setup.data = 0;
278 txq->ctx_cache.flags = flags;
279 txq->ctx_cache.cmp_mask = cmp_mask;
280 txq->ctx_cache.hdrlen = hdrlen;
284 * Check which hardware context can be used. Use the existing match
285 * or create a new context descriptor.
287 static inline uint32_t
288 what_ctx_update(struct em_tx_queue *txq, uint16_t flags,
289 union rte_vlan_macip hdrlen)
291 /* If match with the current context */
292 if (likely (txq->ctx_cache.flags == flags &&
293 ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
294 txq->ctx_cache.cmp_mask) == 0))
301 /* Reset transmit descriptors after they have been used */
303 em_xmit_cleanup(struct em_tx_queue *txq)
305 struct em_tx_entry *sw_ring = txq->sw_ring;
306 volatile struct e1000_data_desc *txr = txq->tx_ring;
307 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
308 uint16_t nb_tx_desc = txq->nb_tx_desc;
309 uint16_t desc_to_clean_to;
310 uint16_t nb_tx_to_clean;
312 /* Determine the last descriptor needing to be cleaned */
313 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_rs_thresh);
314 if (desc_to_clean_to >= nb_tx_desc)
315 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
317 /* Check to make sure the last descriptor to clean is done */
318 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
319 if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
321 PMD_TX_FREE_LOG(DEBUG,
322 "TX descriptor %4u is not done"
323 "(port=%d queue=%d)",
325 txq->port_id, txq->queue_id);
326 /* Failed to clean any descriptors, better luck next time */
330 /* Figure out how many descriptors will be cleaned */
331 if (last_desc_cleaned > desc_to_clean_to)
332 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
335 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
338 PMD_TX_FREE_LOG(DEBUG,
339 "Cleaning %4u TX descriptors: %4u to %4u "
340 "(port=%d queue=%d)",
341 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
342 txq->port_id, txq->queue_id);
345 * The last descriptor to clean is done, so that means all the
346 * descriptors from the last descriptor that was cleaned
347 * up to the last descriptor with the RS bit set
348 * are done. Only reset the threshold descriptor.
350 txr[desc_to_clean_to].upper.fields.status = 0;
352 /* Update the txq to reflect the last descriptor that was cleaned */
353 txq->last_desc_cleaned = desc_to_clean_to;
354 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
360 static inline uint32_t
361 tx_desc_cksum_flags_to_upper(uint16_t ol_flags)
363 static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
364 static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
367 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
368 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
373 eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
376 struct em_tx_queue *txq;
377 struct em_tx_entry *sw_ring;
378 struct em_tx_entry *txe, *txn;
379 volatile struct e1000_data_desc *txr;
380 volatile struct e1000_data_desc *txd;
381 struct rte_mbuf *tx_pkt;
382 struct rte_mbuf *m_seg;
383 uint64_t buf_dma_addr;
385 uint32_t cmd_type_len;
395 union rte_vlan_macip hdrlen;
398 sw_ring = txq->sw_ring;
400 tx_id = txq->tx_tail;
401 txe = &sw_ring[tx_id];
403 /* Determine if the descriptor ring needs to be cleaned. */
404 if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
405 em_xmit_cleanup(txq);
409 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
413 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
416 * Determine how many (if any) context descriptors
417 * are needed for offload functionality.
419 ol_flags = tx_pkt->ol_flags;
421 /* If hardware offload required */
422 tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
425 hdrlen = tx_pkt->pkt.vlan_macip;
426 /* If new context to be built or reuse the exist ctx. */
427 ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
429 /* Only allocate context descriptor if required*/
430 new_ctx = (ctx == EM_CTX_NUM);
434 * Keep track of how many descriptors are used this loop
435 * This will always be the number of segments + the number of
436 * Context descriptors required to transmit the packet
438 nb_used = (uint16_t)(tx_pkt->pkt.nb_segs + new_ctx);
441 * The number of descriptors that must be allocated for a
442 * packet is the number of segments of that packet, plus 1
443 * Context Descriptor for the hardware offload, if any.
444 * Determine the last TX descriptor to allocate in the TX ring
445 * for the packet, starting from the current position (tx_id)
448 tx_last = (uint16_t) (tx_id + nb_used - 1);
451 if (tx_last >= txq->nb_tx_desc)
452 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
454 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
455 " tx_first=%u tx_last=%u\n",
456 (unsigned) txq->port_id,
457 (unsigned) txq->queue_id,
458 (unsigned) tx_pkt->pkt.pkt_len,
463 * Make sure there are enough TX descriptors available to
464 * transmit the entire packet.
465 * nb_used better be less than or equal to txq->tx_rs_thresh
467 while (unlikely (nb_used > txq->nb_tx_free)) {
468 PMD_TX_FREE_LOG(DEBUG,
469 "Not enough free TX descriptors "
470 "nb_used=%4u nb_free=%4u "
471 "(port=%d queue=%d)",
472 nb_used, txq->nb_tx_free,
473 txq->port_id, txq->queue_id);
475 if (em_xmit_cleanup(txq) != 0) {
476 /* Could not clean any descriptors */
484 * By now there are enough free TX descriptors to transmit
489 * Set common flags of all TX Data Descriptors.
491 * The following bits must be set in all Data Descriptors:
492 * - E1000_TXD_DTYP_DATA
493 * - E1000_TXD_DTYP_DEXT
495 * The following bits must be set in the first Data Descriptor
496 * and are ignored in the other ones:
497 * - E1000_TXD_POPTS_IXSM
498 * - E1000_TXD_POPTS_TXSM
500 * The following bits must be set in the last Data Descriptor
501 * and are ignored in the other ones:
502 * - E1000_TXD_CMD_VLE
503 * - E1000_TXD_CMD_IFCS
505 * The following bits must only be set in the last Data
507 * - E1000_TXD_CMD_EOP
509 * The following bits can be set in any Data Descriptor, but
510 * are only set in the last Data Descriptor:
513 cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
517 /* Set VLAN Tag offload fields. */
518 if (ol_flags & PKT_TX_VLAN_PKT) {
519 cmd_type_len |= E1000_TXD_CMD_VLE;
520 popts_spec = tx_pkt->pkt.vlan_macip.f.vlan_tci <<
521 E1000_TXD_VLAN_SHIFT;
526 * Setup the TX Context Descriptor if required
529 volatile struct e1000_context_desc *ctx_txd;
531 ctx_txd = (volatile struct e1000_context_desc *)
534 txn = &sw_ring[txe->next_id];
535 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
537 if (txe->mbuf != NULL) {
538 rte_pktmbuf_free_seg(txe->mbuf);
542 em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
545 txe->last_id = tx_last;
546 tx_id = txe->next_id;
551 * Setup the TX Data Descriptor,
552 * This path will go through
553 * whatever new/reuse the context descriptor
555 popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
561 txn = &sw_ring[txe->next_id];
563 if (txe->mbuf != NULL)
564 rte_pktmbuf_free_seg(txe->mbuf);
568 * Set up Transmit Data Descriptor.
570 slen = m_seg->pkt.data_len;
571 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
573 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
574 txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
575 txd->upper.data = rte_cpu_to_le_32(popts_spec);
577 txe->last_id = tx_last;
578 tx_id = txe->next_id;
580 m_seg = m_seg->pkt.next;
581 } while (m_seg != NULL);
584 * The last packet data descriptor needs End Of Packet (EOP)
586 cmd_type_len |= E1000_TXD_CMD_EOP;
587 txq->nb_tx_used = (uint16_t)(txq->nb_tx_used + nb_used);
588 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
590 /* Set RS bit only on threshold packets' last descriptor */
591 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
592 PMD_TX_FREE_LOG(DEBUG,
593 "Setting RS bit on TXD id="
594 "%4u (port=%d queue=%d)",
595 tx_last, txq->port_id, txq->queue_id);
597 cmd_type_len |= E1000_TXD_CMD_RS;
599 /* Update txq RS bit counters */
602 txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
608 * Set the Transmit Descriptor Tail (TDT)
610 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
611 (unsigned) txq->port_id, (unsigned) txq->queue_id,
612 (unsigned) tx_id, (unsigned) nb_tx);
613 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
614 txq->tx_tail = tx_id;
619 /*********************************************************************
623 **********************************************************************/
625 static inline uint16_t
626 rx_desc_status_to_pkt_flags(uint32_t rx_status)
630 /* Check if VLAN present */
631 pkt_flags = (uint16_t)((rx_status & E1000_RXD_STAT_VP) ?
632 PKT_RX_VLAN_PKT : 0);
637 static inline uint16_t
638 rx_desc_error_to_pkt_flags(uint32_t rx_error)
640 uint16_t pkt_flags = 0;
642 if (rx_error & E1000_RXD_ERR_IPE)
643 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
644 if (rx_error & E1000_RXD_ERR_TCPE)
645 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
650 eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
653 volatile struct e1000_rx_desc *rx_ring;
654 volatile struct e1000_rx_desc *rxdp;
655 struct em_rx_queue *rxq;
656 struct em_rx_entry *sw_ring;
657 struct em_rx_entry *rxe;
658 struct rte_mbuf *rxm;
659 struct rte_mbuf *nmb;
660 struct e1000_rx_desc rxd;
672 rx_id = rxq->rx_tail;
673 rx_ring = rxq->rx_ring;
674 sw_ring = rxq->sw_ring;
675 while (nb_rx < nb_pkts) {
677 * The order of operations here is important as the DD status
678 * bit must not be read after any other descriptor fields.
679 * rx_ring and rxdp are pointing to volatile data so the order
680 * of accesses cannot be reordered by the compiler. If they were
681 * not volatile, they could be reordered which could lead to
682 * using invalid descriptor fields when read from rxd.
684 rxdp = &rx_ring[rx_id];
685 status = rxdp->status;
686 if (! (status & E1000_RXD_STAT_DD))
693 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
694 * likely to be invalid and to be dropped by the various
695 * validation checks performed by the network stack.
697 * Allocate a new mbuf to replenish the RX ring descriptor.
698 * If the allocation fails:
699 * - arrange for that RX descriptor to be the first one
700 * being parsed the next time the receive function is
701 * invoked [on the same queue].
703 * - Stop parsing the RX ring and return immediately.
705 * This policy do not drop the packet received in the RX
706 * descriptor for which the allocation of a new mbuf failed.
707 * Thus, it allows that packet to be later retrieved if
708 * mbuf have been freed in the mean time.
709 * As a side effect, holding RX descriptors instead of
710 * systematically giving them back to the NIC may lead to
711 * RX ring exhaustion situations.
712 * However, the NIC can gracefully prevent such situations
713 * to happen by sending specific "back-pressure" flow control
714 * frames to its peer(s).
716 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
717 "status=0x%x pkt_len=%u\n",
718 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
719 (unsigned) rx_id, (unsigned) status,
720 (unsigned) rte_le_to_cpu_16(rxd.length));
722 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
724 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
726 (unsigned) rxq->port_id,
727 (unsigned) rxq->queue_id);
728 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
733 rxe = &sw_ring[rx_id];
735 if (rx_id == rxq->nb_rx_desc)
738 /* Prefetch next mbuf while processing current one. */
739 rte_em_prefetch(sw_ring[rx_id].mbuf);
742 * When next RX descriptor is on a cache-line boundary,
743 * prefetch the next 4 RX descriptors and the next 8 pointers
746 if ((rx_id & 0x3) == 0) {
747 rte_em_prefetch(&rx_ring[rx_id]);
748 rte_em_prefetch(&sw_ring[rx_id]);
751 /* Rearm RXD: attach new mbuf and reset status to zero. */
756 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
757 rxdp->buffer_addr = dma_addr;
761 * Initialize the returned mbuf.
762 * 1) setup generic mbuf fields:
763 * - number of segments,
766 * - RX port identifier.
767 * 2) integrate hardware offload data, if any:
769 * - IP checksum flag,
770 * - VLAN TCI, if any,
773 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
775 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
776 rte_packet_prefetch(rxm->pkt.data);
777 rxm->pkt.nb_segs = 1;
778 rxm->pkt.next = NULL;
779 rxm->pkt.pkt_len = pkt_len;
780 rxm->pkt.data_len = pkt_len;
781 rxm->pkt.in_port = rxq->port_id;
783 rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
784 rxm->ol_flags = (uint16_t)(rxm->ol_flags |
785 rx_desc_error_to_pkt_flags(rxd.errors));
787 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
788 rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
791 * Store the mbuf address into the next entry of the array
792 * of returned packets.
794 rx_pkts[nb_rx++] = rxm;
796 rxq->rx_tail = rx_id;
799 * If the number of free RX descriptors is greater than the RX free
800 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
802 * Update the RDT with the value of the last processed RX descriptor
803 * minus 1, to guarantee that the RDT register is never equal to the
804 * RDH register, which creates a "full" ring situtation from the
805 * hardware point of view...
807 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
808 if (nb_hold > rxq->rx_free_thresh) {
809 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
810 "nb_hold=%u nb_rx=%u\n",
811 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
812 (unsigned) rx_id, (unsigned) nb_hold,
814 rx_id = (uint16_t) ((rx_id == 0) ?
815 (rxq->nb_rx_desc - 1) : (rx_id - 1));
816 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
819 rxq->nb_rx_hold = nb_hold;
824 eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
827 struct em_rx_queue *rxq;
828 volatile struct e1000_rx_desc *rx_ring;
829 volatile struct e1000_rx_desc *rxdp;
830 struct em_rx_entry *sw_ring;
831 struct em_rx_entry *rxe;
832 struct rte_mbuf *first_seg;
833 struct rte_mbuf *last_seg;
834 struct rte_mbuf *rxm;
835 struct rte_mbuf *nmb;
836 struct e1000_rx_desc rxd;
837 uint64_t dma; /* Physical address of mbuf data buffer */
848 rx_id = rxq->rx_tail;
849 rx_ring = rxq->rx_ring;
850 sw_ring = rxq->sw_ring;
853 * Retrieve RX context of current packet, if any.
855 first_seg = rxq->pkt_first_seg;
856 last_seg = rxq->pkt_last_seg;
858 while (nb_rx < nb_pkts) {
861 * The order of operations here is important as the DD status
862 * bit must not be read after any other descriptor fields.
863 * rx_ring and rxdp are pointing to volatile data so the order
864 * of accesses cannot be reordered by the compiler. If they were
865 * not volatile, they could be reordered which could lead to
866 * using invalid descriptor fields when read from rxd.
868 rxdp = &rx_ring[rx_id];
869 status = rxdp->status;
870 if (! (status & E1000_RXD_STAT_DD))
877 * Allocate a new mbuf to replenish the RX ring descriptor.
878 * If the allocation fails:
879 * - arrange for that RX descriptor to be the first one
880 * being parsed the next time the receive function is
881 * invoked [on the same queue].
883 * - Stop parsing the RX ring and return immediately.
885 * This policy does not drop the packet received in the RX
886 * descriptor for which the allocation of a new mbuf failed.
887 * Thus, it allows that packet to be later retrieved if
888 * mbuf have been freed in the mean time.
889 * As a side effect, holding RX descriptors instead of
890 * systematically giving them back to the NIC may lead to
891 * RX ring exhaustion situations.
892 * However, the NIC can gracefully prevent such situations
893 * to happen by sending specific "back-pressure" flow control
894 * frames to its peer(s).
896 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
897 "status=0x%x data_len=%u\n",
898 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
899 (unsigned) rx_id, (unsigned) status,
900 (unsigned) rte_le_to_cpu_16(rxd.length));
902 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
904 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
905 "queue_id=%u\n", (unsigned) rxq->port_id,
906 (unsigned) rxq->queue_id);
907 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
912 rxe = &sw_ring[rx_id];
914 if (rx_id == rxq->nb_rx_desc)
917 /* Prefetch next mbuf while processing current one. */
918 rte_em_prefetch(sw_ring[rx_id].mbuf);
921 * When next RX descriptor is on a cache-line boundary,
922 * prefetch the next 4 RX descriptors and the next 8 pointers
925 if ((rx_id & 0x3) == 0) {
926 rte_em_prefetch(&rx_ring[rx_id]);
927 rte_em_prefetch(&sw_ring[rx_id]);
931 * Update RX descriptor with the physical address of the new
932 * data buffer of the new allocated mbuf.
936 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
937 rxdp->buffer_addr = dma;
941 * Set data length & data buffer address of mbuf.
943 data_len = rte_le_to_cpu_16(rxd.length);
944 rxm->pkt.data_len = data_len;
945 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
948 * If this is the first buffer of the received packet,
949 * set the pointer to the first mbuf of the packet and
950 * initialize its context.
951 * Otherwise, update the total length and the number of segments
952 * of the current scattered packet, and update the pointer to
953 * the last mbuf of the current packet.
955 if (first_seg == NULL) {
957 first_seg->pkt.pkt_len = data_len;
958 first_seg->pkt.nb_segs = 1;
960 first_seg->pkt.pkt_len += data_len;
961 first_seg->pkt.nb_segs++;
962 last_seg->pkt.next = rxm;
966 * If this is not the last buffer of the received packet,
967 * update the pointer to the last mbuf of the current scattered
968 * packet and continue to parse the RX ring.
970 if (! (status & E1000_RXD_STAT_EOP)) {
976 * This is the last buffer of the received packet.
977 * If the CRC is not stripped by the hardware:
978 * - Subtract the CRC length from the total packet length.
979 * - If the last buffer only contains the whole CRC or a part
980 * of it, free the mbuf associated to the last buffer.
981 * If part of the CRC is also contained in the previous
982 * mbuf, subtract the length of that CRC part from the
983 * data length of the previous mbuf.
985 rxm->pkt.next = NULL;
986 if (unlikely(rxq->crc_len > 0)) {
987 first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
988 if (data_len <= ETHER_CRC_LEN) {
989 rte_pktmbuf_free_seg(rxm);
990 first_seg->pkt.nb_segs--;
991 last_seg->pkt.data_len = (uint16_t)
992 (last_seg->pkt.data_len -
993 (ETHER_CRC_LEN - data_len));
994 last_seg->pkt.next = NULL;
997 (uint16_t) (data_len - ETHER_CRC_LEN);
1001 * Initialize the first mbuf of the returned packet:
1002 * - RX port identifier,
1003 * - hardware offload data, if any:
1004 * - IP checksum flag,
1007 first_seg->pkt.in_port = rxq->port_id;
1009 first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
1010 first_seg->ol_flags = (uint16_t)(first_seg->ol_flags |
1011 rx_desc_error_to_pkt_flags(rxd.errors));
1013 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1014 rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
1016 /* Prefetch data of first segment, if configured to do so. */
1017 rte_packet_prefetch(first_seg->pkt.data);
1020 * Store the mbuf address into the next entry of the array
1021 * of returned packets.
1023 rx_pkts[nb_rx++] = first_seg;
1026 * Setup receipt context for a new packet.
1032 * Record index of the next RX descriptor to probe.
1034 rxq->rx_tail = rx_id;
1037 * Save receive context.
1039 rxq->pkt_first_seg = first_seg;
1040 rxq->pkt_last_seg = last_seg;
1043 * If the number of free RX descriptors is greater than the RX free
1044 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1046 * Update the RDT with the value of the last processed RX descriptor
1047 * minus 1, to guarantee that the RDT register is never equal to the
1048 * RDH register, which creates a "full" ring situtation from the
1049 * hardware point of view...
1051 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1052 if (nb_hold > rxq->rx_free_thresh) {
1053 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1054 "nb_hold=%u nb_rx=%u\n",
1055 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1056 (unsigned) rx_id, (unsigned) nb_hold,
1058 rx_id = (uint16_t) ((rx_id == 0) ?
1059 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1060 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1063 rxq->nb_rx_hold = nb_hold;
1068 * Rings setup and release.
1070 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1071 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1072 * This will also optimize cache line size effect.
1073 * H/W supports up to cache line size 128.
1075 #define EM_ALIGN 128
1078 * Maximum number of Ring Descriptors.
1080 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
1081 * desscriptors should meet the following condition:
1082 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1084 #define EM_MIN_RING_DESC 32
1085 #define EM_MAX_RING_DESC 4096
1087 #define EM_MAX_BUF_SIZE 16384
1088 #define EM_RCTL_FLXBUF_STEP 1024
1090 static const struct rte_memzone *
1091 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1092 uint16_t queue_id, uint32_t ring_size, int socket_id)
1094 const struct rte_memzone *mz;
1095 char z_name[RTE_MEMZONE_NAMESIZE];
1097 rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1098 dev->driver->pci_drv.name, ring_name, dev->data->port_id,
1101 if ((mz = rte_memzone_lookup(z_name)) != 0)
1104 return rte_memzone_reserve(z_name, ring_size, socket_id, 0);
1108 em_tx_queue_release_mbufs(struct em_tx_queue *txq)
1112 if (txq->sw_ring != NULL) {
1113 for (i = 0; i != txq->nb_tx_desc; i++) {
1114 if (txq->sw_ring[i].mbuf != NULL) {
1115 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1116 txq->sw_ring[i].mbuf = NULL;
1123 em_tx_queue_release(struct em_tx_queue *txq)
1126 em_tx_queue_release_mbufs(txq);
1127 rte_free(txq->sw_ring);
1133 eth_em_tx_queue_release(void *txq)
1135 em_tx_queue_release(txq);
1138 /* (Re)set dynamic em_tx_queue fields to defaults */
1140 em_reset_tx_queue(struct em_tx_queue *txq)
1142 uint16_t i, nb_desc, prev;
1143 static const struct e1000_data_desc txd_init = {
1144 .upper.fields = {.status = E1000_TXD_STAT_DD},
1147 nb_desc = txq->nb_tx_desc;
1149 /* Initialize ring entries */
1151 prev = (uint16_t) (nb_desc - 1);
1153 for (i = 0; i < nb_desc; i++) {
1154 txq->tx_ring[i] = txd_init;
1155 txq->sw_ring[i].mbuf = NULL;
1156 txq->sw_ring[i].last_id = i;
1157 txq->sw_ring[prev].next_id = i;
1162 * Always allow 1 descriptor to be un-allocated to avoid
1163 * a H/W race condition
1165 txq->nb_tx_free = (uint16_t)(nb_desc - 1);
1166 txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
1167 txq->nb_tx_used = 0;
1170 memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
1174 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
1177 unsigned int socket_id,
1178 const struct rte_eth_txconf *tx_conf)
1180 const struct rte_memzone *tz;
1181 struct em_tx_queue *txq;
1182 struct e1000_hw *hw;
1184 uint16_t tx_rs_thresh, tx_free_thresh;
1186 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1189 * Validate number of transmit descriptors.
1190 * It must not exceed hardware maximum, and must be multiple
1193 if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
1194 (nb_desc > EM_MAX_RING_DESC) ||
1195 (nb_desc < EM_MIN_RING_DESC)) {
1199 tx_free_thresh = tx_conf->tx_free_thresh;
1200 if (tx_free_thresh == 0)
1201 tx_free_thresh = (uint16_t)RTE_MIN(nb_desc / 4,
1202 DEFAULT_TX_FREE_THRESH);
1204 tx_rs_thresh = tx_conf->tx_rs_thresh;
1205 if (tx_rs_thresh == 0)
1206 tx_rs_thresh = (uint16_t)RTE_MIN(tx_free_thresh,
1207 DEFAULT_TX_RS_THRESH);
1209 if (tx_free_thresh >= (nb_desc - 3)) {
1210 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
1211 "number of TX descriptors minus 3. (tx_free_thresh=%u "
1212 "port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
1213 (int)dev->data->port_id, (int)queue_idx);
1216 if (tx_rs_thresh > tx_free_thresh) {
1217 RTE_LOG(ERR, PMD, "tx_rs_thresh must be less than or equal to "
1218 "tx_free_thresh. (tx_free_thresh=%u tx_rs_thresh=%u "
1219 "port=%d queue=%d)\n", (unsigned int)tx_free_thresh,
1220 (unsigned int)tx_rs_thresh, (int)dev->data->port_id,
1226 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1227 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1228 * by the NIC and all descriptors are written back after the NIC
1229 * accumulates WTHRESH descriptors.
1231 if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
1232 RTE_LOG(ERR, PMD, "TX WTHRESH must be set to 0 if "
1233 "tx_rs_thresh is greater than 1. (tx_rs_thresh=%u "
1234 "port=%d queue=%d)\n", (unsigned int)tx_rs_thresh,
1235 (int)dev->data->port_id, (int)queue_idx);
1239 /* Free memory prior to re-allocation if needed... */
1240 if (dev->data->tx_queues[queue_idx] != NULL) {
1241 em_tx_queue_release(dev->data->tx_queues[queue_idx]);
1242 dev->data->tx_queues[queue_idx] = NULL;
1246 * Allocate TX ring hardware descriptors. A memzone large enough to
1247 * handle the maximum ring size is allocated in order to allow for
1248 * resizing in later calls to the queue setup function.
1250 tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
1251 if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
1252 socket_id)) == NULL)
1255 /* Allocate the tx queue data structure. */
1256 if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
1257 CACHE_LINE_SIZE)) == NULL)
1260 /* Allocate software ring */
1261 if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
1262 sizeof(txq->sw_ring[0]) * nb_desc,
1263 CACHE_LINE_SIZE)) == NULL) {
1264 em_tx_queue_release(txq);
1268 txq->nb_tx_desc = nb_desc;
1269 txq->tx_free_thresh = tx_free_thresh;
1270 txq->tx_rs_thresh = tx_rs_thresh;
1271 txq->pthresh = tx_conf->tx_thresh.pthresh;
1272 txq->hthresh = tx_conf->tx_thresh.hthresh;
1273 txq->wthresh = tx_conf->tx_thresh.wthresh;
1274 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1276 txq->queue_id = queue_idx;
1277 txq->port_id = dev->data->port_id;
1279 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1280 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1281 txq->tx_ring = (struct e1000_data_desc *) tz->addr;
1283 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1284 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1286 em_reset_tx_queue(txq);
1288 dev->data->tx_queues[queue_idx] = txq;
1293 em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
1297 if (rxq->sw_ring != NULL) {
1298 for (i = 0; i != rxq->nb_rx_desc; i++) {
1299 if (rxq->sw_ring[i].mbuf != NULL) {
1300 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1301 rxq->sw_ring[i].mbuf = NULL;
1308 em_rx_queue_release(struct em_rx_queue *rxq)
1311 em_rx_queue_release_mbufs(rxq);
1312 rte_free(rxq->sw_ring);
1318 eth_em_rx_queue_release(void *rxq)
1320 em_rx_queue_release(rxq);
1323 /* Reset dynamic em_rx_queue fields back to defaults */
1325 em_reset_rx_queue(struct em_rx_queue *rxq)
1328 rxq->nb_rx_hold = 0;
1329 rxq->pkt_first_seg = NULL;
1330 rxq->pkt_last_seg = NULL;
1334 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
1337 unsigned int socket_id,
1338 const struct rte_eth_rxconf *rx_conf,
1339 struct rte_mempool *mp)
1341 const struct rte_memzone *rz;
1342 struct em_rx_queue *rxq;
1343 struct e1000_hw *hw;
1346 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1349 * Validate number of receive descriptors.
1350 * It must not exceed hardware maximum, and must be multiple
1353 if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
1354 (nb_desc > EM_MAX_RING_DESC) ||
1355 (nb_desc < EM_MIN_RING_DESC)) {
1360 * EM devices don't support drop_en functionality
1362 if (rx_conf->rx_drop_en) {
1363 RTE_LOG(ERR, PMD, "drop_en functionality not supported by device\n");
1367 /* Free memory prior to re-allocation if needed. */
1368 if (dev->data->rx_queues[queue_idx] != NULL) {
1369 em_rx_queue_release(dev->data->rx_queues[queue_idx]);
1370 dev->data->rx_queues[queue_idx] = NULL;
1373 /* Allocate RX ring for max possible mumber of hardware descriptors. */
1374 rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
1375 if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
1376 socket_id)) == NULL)
1379 /* Allocate the RX queue data structure. */
1380 if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
1381 CACHE_LINE_SIZE)) == NULL)
1384 /* Allocate software ring. */
1385 if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1386 sizeof (rxq->sw_ring[0]) * nb_desc,
1387 CACHE_LINE_SIZE)) == NULL) {
1388 em_rx_queue_release(rxq);
1393 rxq->nb_rx_desc = nb_desc;
1394 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1395 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1396 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1397 if (rxq->wthresh > 0 && hw->mac.type == e1000_82576)
1400 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1401 rxq->queue_id = queue_idx;
1402 rxq->port_id = dev->data->port_id;
1403 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
1406 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1407 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(queue_idx));
1408 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1409 rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
1411 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1412 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1414 dev->data->rx_queues[queue_idx] = rxq;
1415 em_reset_rx_queue(rxq);
1421 eth_em_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1423 struct em_rx_queue *rxq;
1424 uint32_t nb_pkts_available;
1428 if (rx_queue_id >= dev->data->nb_rx_queues) {
1429 PMD_RX_LOG(DEBUG,"Invalid RX queue_id=%d\n", rx_queue_id);
1433 rxq = dev->data->rx_queues[rx_queue_id];
1434 rx_id = (uint16_t) ((rxq->rx_tail == 0) ? (rxq->nb_rx_desc - 1) :
1435 (rxq->rx_tail - 1));
1436 rx_rdh = E1000_PCI_REG(rxq->rdh_reg_addr);
1438 nb_pkts_available = rx_rdh - rx_id;
1440 nb_pkts_available = rx_rdh - rx_id + rxq->nb_rx_desc;
1442 return (nb_pkts_available);
1446 em_dev_clear_queues(struct rte_eth_dev *dev)
1449 struct em_tx_queue *txq;
1450 struct em_rx_queue *rxq;
1452 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1453 txq = dev->data->tx_queues[i];
1455 em_tx_queue_release_mbufs(txq);
1456 em_reset_tx_queue(txq);
1460 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1461 rxq = dev->data->rx_queues[i];
1463 em_rx_queue_release_mbufs(rxq);
1464 em_reset_rx_queue(rxq);
1470 * Takes as input/output parameter RX buffer size.
1471 * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
1474 em_rctl_bsize(__rte_unused enum e1000_mac_type hwtyp, uint32_t *bufsz)
1477 * For BSIZE & BSEX all configurable sizes are:
1478 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1479 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1480 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1481 * 2048: rctl |= E1000_RCTL_SZ_2048;
1482 * 1024: rctl |= E1000_RCTL_SZ_1024;
1483 * 512: rctl |= E1000_RCTL_SZ_512;
1484 * 256: rctl |= E1000_RCTL_SZ_256;
1486 static const struct {
1489 } bufsz_to_rctl[] = {
1490 {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
1491 {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
1492 {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
1493 {2048, E1000_RCTL_SZ_2048},
1494 {1024, E1000_RCTL_SZ_1024},
1495 {512, E1000_RCTL_SZ_512},
1496 {256, E1000_RCTL_SZ_256},
1500 uint32_t rctl_bsize;
1502 rctl_bsize = *bufsz;
1505 * Starting from 82571 it is possible to specify RX buffer size
1506 * by RCTL.FLXBUF. When this field is different from zero, the
1507 * RX buffer size = RCTL.FLXBUF * 1K
1508 * (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
1509 * It is working ok on real HW, but by some reason doesn't work
1510 * on VMware emulated 82574L.
1511 * So for now, always use BSIZE/BSEX to setup RX buffer size.
1512 * If you don't plan to use it on VMware emulated 82574L and
1513 * would like to specify RX buffer size in 1K granularity,
1514 * uncomment the following lines:
1515 * ***************************************************************
1516 * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
1517 * rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
1518 * rctl_bsize /= EM_RCTL_FLXBUF_STEP;
1519 * *bufsz = rctl_bsize;
1520 * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
1521 * E1000_RCTL_FLXBUF_MASK);
1523 * ***************************************************************
1526 for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
1528 if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
1529 *bufsz = bufsz_to_rctl[i].bufsz;
1530 return (bufsz_to_rctl[i].rctl);
1534 /* Should never happen. */
1539 em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
1541 struct em_rx_entry *rxe = rxq->sw_ring;
1544 static const struct e1000_rx_desc rxd_init = {
1548 /* Initialize software ring entries */
1549 for (i = 0; i < rxq->nb_rx_desc; i++) {
1550 volatile struct e1000_rx_desc *rxd;
1551 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1554 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1555 "queue_id=%hu\n", rxq->queue_id);
1556 em_rx_queue_release(rxq);
1560 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1562 /* Clear HW ring memory */
1563 rxq->rx_ring[i] = rxd_init;
1565 rxd = &rxq->rx_ring[i];
1566 rxd->buffer_addr = dma_addr;
1573 /*********************************************************************
1575 * Enable receive unit.
1577 **********************************************************************/
1579 eth_em_rx_init(struct rte_eth_dev *dev)
1581 struct e1000_hw *hw;
1582 struct em_rx_queue *rxq;
1586 uint32_t rctl_bsize;
1590 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1593 * Make sure receives are disabled while setting
1594 * up the descriptor ring.
1596 rctl = E1000_READ_REG(hw, E1000_RCTL);
1597 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1599 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1601 /* Disable extended descriptor type. */
1602 rfctl &= ~E1000_RFCTL_EXTEN;
1603 /* Disable accelerated acknowledge */
1604 if (hw->mac.type == e1000_82574)
1605 rfctl |= E1000_RFCTL_ACK_DIS;
1607 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1610 * XXX TEMPORARY WORKAROUND: on some systems with 82573
1611 * long latencies are observed, like Lenovo X60. This
1612 * change eliminates the problem, but since having positive
1613 * values in RDTR is a known source of problems on other
1614 * platforms another solution is being sought.
1616 if (hw->mac.type == e1000_82573)
1617 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
1619 dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
1621 /* Determine RX bufsize. */
1622 rctl_bsize = EM_MAX_BUF_SIZE;
1623 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1624 struct rte_pktmbuf_pool_private *mbp_priv;
1627 rxq = dev->data->rx_queues[i];
1628 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1629 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1630 rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
1633 rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
1635 /* Configure and enable each RX queue. */
1636 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1640 rxq = dev->data->rx_queues[i];
1642 /* Allocate buffers for descriptor rings and setup queue */
1643 ret = em_alloc_rx_queue_mbufs(rxq);
1648 * Reset crc_len in case it was changed after queue setup by a
1652 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1655 bus_addr = rxq->rx_ring_phys_addr;
1656 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1658 sizeof(*rxq->rx_ring));
1659 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1660 (uint32_t)(bus_addr >> 32));
1661 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1663 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1664 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1666 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1667 rxdctl &= 0xFE000000;
1668 rxdctl |= rxq->pthresh & 0x3F;
1669 rxdctl |= (rxq->hthresh & 0x3F) << 8;
1670 rxdctl |= (rxq->wthresh & 0x3F) << 16;
1671 rxdctl |= E1000_RXDCTL_GRAN;
1672 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1675 * Due to EM devices not having any sort of hardware
1676 * limit for packet length, jumbo frame of any size
1677 * can be accepted, thus we have to enable scattered
1678 * rx if jumbo frames are enabled (or if buffer size
1679 * is too small to accomodate non-jumbo packets)
1680 * to avoid splitting packets that don't fit into
1683 if (dev->data->dev_conf.rxmode.jumbo_frame ||
1684 rctl_bsize < ETHER_MAX_LEN) {
1686 (eth_rx_burst_t)eth_em_recv_scattered_pkts;
1687 dev->data->scattered_rx = 1;
1692 * Setup the Checksum Register.
1693 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1695 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1697 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1698 rxcsum |= E1000_RXCSUM_IPOFL;
1700 rxcsum &= ~E1000_RXCSUM_IPOFL;
1701 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1703 /* No MRQ or RSS support for now */
1705 /* Set early receive threshold on appropriate hw */
1706 if ((hw->mac.type == e1000_ich9lan ||
1707 hw->mac.type == e1000_pch2lan ||
1708 hw->mac.type == e1000_ich10lan) &&
1709 dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1710 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1711 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
1712 E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
1715 if (hw->mac.type == e1000_pch2lan) {
1716 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1717 e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
1719 e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
1722 /* Setup the Receive Control Register. */
1723 if (dev->data->dev_conf.rxmode.hw_strip_crc)
1724 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1726 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1728 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1729 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1730 E1000_RCTL_RDMTS_HALF |
1731 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1733 /* Make sure VLAN Filters are off. */
1734 rctl &= ~E1000_RCTL_VFE;
1735 /* Don't store bad packets. */
1736 rctl &= ~E1000_RCTL_SBP;
1737 /* Legacy descriptor type. */
1738 rctl &= ~E1000_RCTL_DTYP_MASK;
1741 * Configure support of jumbo frames, if any.
1743 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1744 rctl |= E1000_RCTL_LPE;
1746 rctl &= ~E1000_RCTL_LPE;
1748 /* Enable Receives. */
1749 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1754 /*********************************************************************
1756 * Enable transmit unit.
1758 **********************************************************************/
1760 eth_em_tx_init(struct rte_eth_dev *dev)
1762 struct e1000_hw *hw;
1763 struct em_tx_queue *txq;
1768 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1770 /* Setup the Base and Length of the Tx Descriptor Rings. */
1771 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1774 txq = dev->data->tx_queues[i];
1775 bus_addr = txq->tx_ring_phys_addr;
1776 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1778 sizeof(*txq->tx_ring));
1779 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1780 (uint32_t)(bus_addr >> 32));
1781 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1783 /* Setup the HW Tx Head and Tail descriptor pointers. */
1784 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1785 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1787 /* Setup Transmit threshold registers. */
1788 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1790 * bit 22 is reserved, on some models should always be 0,
1791 * on others - always 1.
1793 txdctl &= E1000_TXDCTL_COUNT_DESC;
1794 txdctl |= txq->pthresh & 0x3F;
1795 txdctl |= (txq->hthresh & 0x3F) << 8;
1796 txdctl |= (txq->wthresh & 0x3F) << 16;
1797 txdctl |= E1000_TXDCTL_GRAN;
1798 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1801 /* Program the Transmit Control Register. */
1802 tctl = E1000_READ_REG(hw, E1000_TCTL);
1803 tctl &= ~E1000_TCTL_CT;
1804 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1805 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1807 /* This write will effectively turn on the transmit unit. */
1808 E1000_WRITE_REG(hw, E1000_TCTL, tctl);