4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_interrupts.h>
47 #include <rte_byteorder.h>
48 #include <rte_common.h>
50 #include <rte_debug.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_lcore.h>
60 #include <rte_atomic.h>
61 #include <rte_branch_prediction.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_prefetch.h>
73 #include <rte_string_fns.h>
75 #include "e1000_logs.h"
76 #include "e1000/e1000_api.h"
77 #include "e1000_ethdev.h"
79 #define E1000_TXD_VLAN_SHIFT 16
81 #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
83 static inline struct rte_mbuf *
84 rte_rxmbuf_alloc(struct rte_mempool *mp)
88 m = __rte_mbuf_raw_alloc(mp);
89 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
93 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
94 (uint64_t) ((mb)->buf_physaddr + \
95 (uint64_t) ((char *)((mb)->pkt.data) - (char *)(mb)->buf_addr))
97 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
98 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
101 * Structure associated with each descriptor of the RX ring of a RX queue.
104 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
108 * Structure associated with each descriptor of the TX ring of a TX queue.
111 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
112 uint16_t next_id; /**< Index of next descriptor in ring. */
113 uint16_t last_id; /**< Index of last scattered descriptor. */
117 * Structure associated with each RX queue.
120 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
121 volatile struct e1000_rx_desc *rx_ring; /**< RX ring virtual address. */
122 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
123 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
124 struct em_rx_entry *sw_ring; /**< address of RX software ring. */
125 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
126 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
127 uint16_t nb_rx_desc; /**< number of RX descriptors. */
128 uint16_t rx_tail; /**< current value of RDT register. */
129 uint16_t nb_rx_hold; /**< number of held free RX desc. */
130 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
131 uint16_t queue_id; /**< RX queue index. */
132 uint8_t port_id; /**< Device port identifier. */
133 uint8_t pthresh; /**< Prefetch threshold register. */
134 uint8_t hthresh; /**< Host threshold register. */
135 uint8_t wthresh; /**< Write-back threshold register. */
136 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
140 * Hardware context number
143 EM_CTX_0 = 0, /**< CTX0 */
144 EM_CTX_NUM = 1, /**< CTX NUM */
148 * Structure to check if new context need be built
151 uint16_t flags; /**< ol_flags related to context build. */
152 uint32_t cmp_mask; /**< compare mask */
153 union rte_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
157 * Structure associated with each TX queue.
160 volatile struct e1000_data_desc *tx_ring; /**< TX ring address */
161 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
162 struct em_tx_entry *sw_ring; /**< virtual address of SW ring. */
163 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
164 uint16_t nb_tx_desc; /**< number of TX descriptors. */
165 uint16_t tx_tail; /**< Current value of TDT register. */
166 uint16_t tx_free_thresh;/**< minimum TX before freeing. */
167 /**< Number of TX descriptors to use before RS bit is set. */
168 uint16_t tx_rs_thresh;
169 /** Number of TX descriptors used since RS bit was set. */
171 /** Index to last TX descriptor to have been cleaned. */
172 uint16_t last_desc_cleaned;
173 /** Total number of TX descriptors ready to be allocated. */
175 uint16_t queue_id; /**< TX queue index. */
176 uint8_t port_id; /**< Device port identifier. */
177 uint8_t pthresh; /**< Prefetch threshold register. */
178 uint8_t hthresh; /**< Host threshold register. */
179 uint8_t wthresh; /**< Write-back threshold register. */
180 struct em_ctx_info ctx_cache;
181 /**< Hardware context history.*/
185 #define RTE_PMD_USE_PREFETCH
188 #ifdef RTE_PMD_USE_PREFETCH
189 #define rte_em_prefetch(p) rte_prefetch0(p)
191 #define rte_em_prefetch(p) do {} while(0)
194 #ifdef RTE_PMD_PACKET_PREFETCH
195 #define rte_packet_prefetch(p) rte_prefetch1(p)
197 #define rte_packet_prefetch(p) do {} while(0)
200 #ifndef DEFAULT_TX_FREE_THRESH
201 #define DEFAULT_TX_FREE_THRESH 32
202 #endif /* DEFAULT_TX_FREE_THRESH */
204 #ifndef DEFAULT_TX_RS_THRESH
205 #define DEFAULT_TX_RS_THRESH 32
206 #endif /* DEFAULT_TX_RS_THRESH */
209 /*********************************************************************
213 **********************************************************************/
216 * Populates TX context descriptor.
219 em_set_xmit_ctx(struct em_tx_queue* txq,
220 volatile struct e1000_context_desc *ctx_txd,
222 union rte_vlan_macip hdrlen)
224 uint32_t cmp_mask, cmd_len;
225 uint16_t ipcse, l2len;
226 struct e1000_context_desc ctx;
229 cmd_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_C;
231 l2len = hdrlen.f.l2_len;
232 ipcse = l2len + hdrlen.f.l3_len;
234 /* setup IPCS* fields */
235 ctx.lower_setup.ip_fields.ipcss = l2len;
236 ctx.lower_setup.ip_fields.ipcso =l2len +
237 offsetof(struct ipv4_hdr, hdr_checksum);
240 * When doing checksum or TCP segmentation with IPv6 headers,
241 * IPCSE field should be set t0 0.
243 if (flags & PKT_TX_IP_CKSUM) {
244 ctx.lower_setup.ip_fields.ipcse = rte_cpu_to_le_16(ipcse - 1);
245 cmd_len |= E1000_TXD_CMD_IP;
246 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
248 ctx.lower_setup.ip_fields.ipcse = 0;
251 /* setup TUCS* fields */
252 ctx.upper_setup.tcp_fields.tucss = ipcse;
253 ctx.upper_setup.tcp_fields.tucse = 0;
255 switch (flags & PKT_TX_L4_MASK) {
256 case PKT_TX_UDP_CKSUM:
257 ctx.upper_setup.tcp_fields.tucso = ipcse +
258 offsetof(struct udp_hdr, dgram_cksum);
259 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
261 case PKT_TX_TCP_CKSUM:
262 ctx.upper_setup.tcp_fields.tucso = ipcse +
263 offsetof(struct tcp_hdr, cksum);
264 cmd_len |= E1000_TXD_CMD_TCP;
265 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
268 ctx.upper_setup.tcp_fields.tucso = 0;
271 ctx.cmd_and_length = rte_cpu_to_le_32(cmd_len);
272 ctx.tcp_seg_setup.data = 0;
276 txq->ctx_cache.flags = flags;
277 txq->ctx_cache.cmp_mask = cmp_mask;
278 txq->ctx_cache.hdrlen = hdrlen;
282 * Check which hardware context can be used. Use the existing match
283 * or create a new context descriptor.
285 static inline uint32_t
286 what_ctx_update(struct em_tx_queue *txq, uint16_t flags,
287 union rte_vlan_macip hdrlen)
289 /* If match with the current context */
290 if (likely (txq->ctx_cache.flags == flags &&
291 ((txq->ctx_cache.hdrlen.data ^ hdrlen.data) &
292 txq->ctx_cache.cmp_mask) == 0))
299 /* Reset transmit descriptors after they have been used */
301 em_xmit_cleanup(struct em_tx_queue *txq)
303 struct em_tx_entry *sw_ring = txq->sw_ring;
304 volatile struct e1000_data_desc *txr = txq->tx_ring;
305 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
306 uint16_t nb_tx_desc = txq->nb_tx_desc;
307 uint16_t desc_to_clean_to;
308 uint16_t nb_tx_to_clean;
310 /* Determine the last descriptor needing to be cleaned */
311 desc_to_clean_to = last_desc_cleaned + txq->tx_rs_thresh;
312 if (desc_to_clean_to >= nb_tx_desc)
313 desc_to_clean_to = desc_to_clean_to - nb_tx_desc;
315 /* Check to make sure the last descriptor to clean is done */
316 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
317 if (! (txr[desc_to_clean_to].upper.fields.status & E1000_TXD_STAT_DD))
319 PMD_TX_FREE_LOG(DEBUG,
320 "TX descriptor %4u is not done"
321 "(port=%d queue=%d)",
323 txq->port_id, txq->queue_id);
324 /* Failed to clean any descriptors, better luck next time */
328 /* Figure out how many descriptors will be cleaned */
329 if (last_desc_cleaned > desc_to_clean_to)
330 nb_tx_to_clean = ((nb_tx_desc - last_desc_cleaned) +
333 nb_tx_to_clean = desc_to_clean_to - last_desc_cleaned;
335 PMD_TX_FREE_LOG(DEBUG,
336 "Cleaning %4u TX descriptors: %4u to %4u "
337 "(port=%d queue=%d)",
338 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
339 txq->port_id, txq->queue_id);
342 * The last descriptor to clean is done, so that means all the
343 * descriptors from the last descriptor that was cleaned
344 * up to the last descriptor with the RS bit set
345 * are done. Only reset the threshold descriptor.
347 txr[desc_to_clean_to].upper.fields.status = 0;
349 /* Update the txq to reflect the last descriptor that was cleaned */
350 txq->last_desc_cleaned = desc_to_clean_to;
351 txq->nb_tx_free += nb_tx_to_clean;
357 static inline uint32_t
358 tx_desc_cksum_flags_to_upper(uint16_t ol_flags)
360 static const uint32_t l4_olinfo[2] = {0, E1000_TXD_POPTS_TXSM << 8};
361 static const uint32_t l3_olinfo[2] = {0, E1000_TXD_POPTS_IXSM << 8};
364 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
365 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
370 eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
373 struct em_tx_queue *txq;
374 struct em_tx_entry *sw_ring;
375 struct em_tx_entry *txe, *txn;
376 volatile struct e1000_data_desc *txr;
377 volatile struct e1000_data_desc *txd;
378 struct rte_mbuf *tx_pkt;
379 struct rte_mbuf *m_seg;
380 uint64_t buf_dma_addr;
382 uint32_t cmd_type_len;
392 union rte_vlan_macip hdrlen;
395 sw_ring = txq->sw_ring;
397 tx_id = txq->tx_tail;
398 txe = &sw_ring[tx_id];
400 /* Determine if the descriptor ring needs to be cleaned. */
401 if ((txq->nb_tx_desc - txq->nb_tx_free) > txq->tx_free_thresh) {
402 em_xmit_cleanup(txq);
406 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
410 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
413 * Determine how many (if any) context descriptors
414 * are needed for offload functionality.
416 ol_flags = tx_pkt->ol_flags;
418 /* If hardware offload required */
419 tx_ol_req = ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK);
421 hdrlen = tx_pkt->pkt.vlan_macip;
422 /* If new context to be built or reuse the exist ctx. */
423 ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
425 /* Only allocate context descriptor if required*/
426 new_ctx = (ctx == EM_CTX_NUM);
430 * Keep track of how many descriptors are used this loop
431 * This will always be the number of segments + the number of
432 * Context descriptors required to transmit the packet
434 nb_used = tx_pkt->pkt.nb_segs + new_ctx;
437 * The number of descriptors that must be allocated for a
438 * packet is the number of segments of that packet, plus 1
439 * Context Descriptor for the hardware offload, if any.
440 * Determine the last TX descriptor to allocate in the TX ring
441 * for the packet, starting from the current position (tx_id)
444 tx_last = (uint16_t) (tx_id + nb_used - 1);
447 if (tx_last >= txq->nb_tx_desc)
448 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
450 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
451 " tx_first=%u tx_last=%u\n",
452 (unsigned) txq->port_id,
453 (unsigned) txq->queue_id,
454 (unsigned) tx_pkt->pkt.pkt_len,
459 * Make sure there are enough TX descriptors available to
460 * transmit the entire packet.
461 * nb_used better be less than or equal to txq->tx_rs_thresh
463 while (unlikely (nb_used > txq->nb_tx_free)) {
464 PMD_TX_FREE_LOG(DEBUG,
465 "Not enough free TX descriptors "
466 "nb_used=%4u nb_free=%4u "
467 "(port=%d queue=%d)",
468 nb_used, txq->nb_tx_free,
469 txq->port_id, txq->queue_id);
471 if (em_xmit_cleanup(txq) != 0) {
472 /* Could not clean any descriptors */
480 * By now there are enough free TX descriptors to transmit
485 * Set common flags of all TX Data Descriptors.
487 * The following bits must be set in all Data Descriptors:
488 * - E1000_TXD_DTYP_DATA
489 * - E1000_TXD_DTYP_DEXT
491 * The following bits must be set in the first Data Descriptor
492 * and are ignored in the other ones:
493 * - E1000_TXD_POPTS_IXSM
494 * - E1000_TXD_POPTS_TXSM
496 * The following bits must be set in the last Data Descriptor
497 * and are ignored in the other ones:
498 * - E1000_TXD_CMD_VLE
499 * - E1000_TXD_CMD_IFCS
501 * The following bits must only be set in the last Data
503 * - E1000_TXD_CMD_EOP
505 * The following bits can be set in any Data Descriptor, but
506 * are only set in the last Data Descriptor:
509 cmd_type_len = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
513 /* Set VLAN Tag offload fields. */
514 if (ol_flags & PKT_TX_VLAN_PKT) {
515 cmd_type_len |= E1000_TXD_CMD_VLE;
516 popts_spec = tx_pkt->pkt.vlan_macip.f.vlan_tci <<
517 E1000_TXD_VLAN_SHIFT;
522 * Setup the TX Context Descriptor if required
525 volatile struct e1000_context_desc *ctx_txd;
527 ctx_txd = (volatile struct e1000_context_desc *)
530 txn = &sw_ring[txe->next_id];
531 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
533 if (txe->mbuf != NULL) {
534 rte_pktmbuf_free_seg(txe->mbuf);
538 em_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
541 txe->last_id = tx_last;
542 tx_id = txe->next_id;
547 * Setup the TX Data Descriptor,
548 * This path will go through
549 * whatever new/reuse the context descriptor
551 popts_spec |= tx_desc_cksum_flags_to_upper(ol_flags);
557 txn = &sw_ring[txe->next_id];
559 if (txe->mbuf != NULL)
560 rte_pktmbuf_free_seg(txe->mbuf);
564 * Set up Transmit Data Descriptor.
566 slen = m_seg->pkt.data_len;
567 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
569 txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
570 txd->lower.data = rte_cpu_to_le_32(cmd_type_len | slen);
571 txd->upper.data = rte_cpu_to_le_32(popts_spec);
573 txe->last_id = tx_last;
574 tx_id = txe->next_id;
576 m_seg = m_seg->pkt.next;
577 } while (m_seg != NULL);
580 * The last packet data descriptor needs End Of Packet (EOP)
582 cmd_type_len |= E1000_TXD_CMD_EOP;
583 txq->nb_tx_used += nb_used;
584 txq->nb_tx_free -= nb_used;
586 /* Set RS bit only on threshold packets' last descriptor */
587 if (txq->nb_tx_used >= txq->tx_rs_thresh) {
588 PMD_TX_FREE_LOG(DEBUG,
589 "Setting RS bit on TXD id="
590 "%4u (port=%d queue=%d)",
591 tx_last, txq->port_id, txq->queue_id);
593 cmd_type_len |= E1000_TXD_CMD_RS;
595 /* Update txq RS bit counters */
598 txd->lower.data |= rte_cpu_to_le_32(cmd_type_len);
604 * Set the Transmit Descriptor Tail (TDT)
606 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
607 (unsigned) txq->port_id, (unsigned) txq->queue_id,
608 (unsigned) tx_id, (unsigned) nb_tx);
609 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
610 txq->tx_tail = tx_id;
615 /*********************************************************************
619 **********************************************************************/
621 static inline uint16_t
622 rx_desc_status_to_pkt_flags(uint32_t rx_status)
626 /* Check if VLAN present */
627 pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ?
633 static inline uint16_t
634 rx_desc_error_to_pkt_flags(uint32_t rx_error)
636 uint16_t pkt_flags = 0;
638 if (rx_error & E1000_RXD_ERR_IPE)
639 pkt_flags |= PKT_RX_IP_CKSUM_BAD;
640 if (rx_error & E1000_RXD_ERR_TCPE)
641 pkt_flags |= PKT_RX_L4_CKSUM_BAD;
646 eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
649 volatile struct e1000_rx_desc *rx_ring;
650 volatile struct e1000_rx_desc *rxdp;
651 struct em_rx_queue *rxq;
652 struct em_rx_entry *sw_ring;
653 struct em_rx_entry *rxe;
654 struct rte_mbuf *rxm;
655 struct rte_mbuf *nmb;
656 struct e1000_rx_desc rxd;
668 rx_id = rxq->rx_tail;
669 rx_ring = rxq->rx_ring;
670 sw_ring = rxq->sw_ring;
671 while (nb_rx < nb_pkts) {
673 * The order of operations here is important as the DD status
674 * bit must not be read after any other descriptor fields.
675 * rx_ring and rxdp are pointing to volatile data so the order
676 * of accesses cannot be reordered by the compiler. If they were
677 * not volatile, they could be reordered which could lead to
678 * using invalid descriptor fields when read from rxd.
680 rxdp = &rx_ring[rx_id];
681 status = rxdp->status;
682 if (! (status & E1000_RXD_STAT_DD))
689 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
690 * likely to be invalid and to be dropped by the various
691 * validation checks performed by the network stack.
693 * Allocate a new mbuf to replenish the RX ring descriptor.
694 * If the allocation fails:
695 * - arrange for that RX descriptor to be the first one
696 * being parsed the next time the receive function is
697 * invoked [on the same queue].
699 * - Stop parsing the RX ring and return immediately.
701 * This policy do not drop the packet received in the RX
702 * descriptor for which the allocation of a new mbuf failed.
703 * Thus, it allows that packet to be later retrieved if
704 * mbuf have been freed in the mean time.
705 * As a side effect, holding RX descriptors instead of
706 * systematically giving them back to the NIC may lead to
707 * RX ring exhaustion situations.
708 * However, the NIC can gracefully prevent such situations
709 * to happen by sending specific "back-pressure" flow control
710 * frames to its peer(s).
712 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
713 "status=0x%x pkt_len=%u\n",
714 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
715 (unsigned) rx_id, (unsigned) status,
716 (unsigned) rte_le_to_cpu_16(rxd.length));
718 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
720 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
722 (unsigned) rxq->port_id,
723 (unsigned) rxq->queue_id);
724 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
729 rxe = &sw_ring[rx_id];
731 if (rx_id == rxq->nb_rx_desc)
734 /* Prefetch next mbuf while processing current one. */
735 rte_em_prefetch(sw_ring[rx_id].mbuf);
738 * When next RX descriptor is on a cache-line boundary,
739 * prefetch the next 4 RX descriptors and the next 8 pointers
742 if ((rx_id & 0x3) == 0) {
743 rte_em_prefetch(&rx_ring[rx_id]);
744 rte_em_prefetch(&sw_ring[rx_id]);
747 /* Rearm RXD: attach new mbuf and reset status to zero. */
752 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
753 rxdp->buffer_addr = dma_addr;
757 * Initialize the returned mbuf.
758 * 1) setup generic mbuf fields:
759 * - number of segments,
762 * - RX port identifier.
763 * 2) integrate hardware offload data, if any:
765 * - IP checksum flag,
766 * - VLAN TCI, if any,
769 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.length) -
771 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
772 rte_packet_prefetch(rxm->pkt.data);
773 rxm->pkt.nb_segs = 1;
774 rxm->pkt.next = NULL;
775 rxm->pkt.pkt_len = pkt_len;
776 rxm->pkt.data_len = pkt_len;
777 rxm->pkt.in_port = rxq->port_id;
779 rxm->ol_flags = rx_desc_status_to_pkt_flags(status);
780 rxm->ol_flags |= rx_desc_error_to_pkt_flags(rxd.errors);
782 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
783 rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
786 * Store the mbuf address into the next entry of the array
787 * of returned packets.
789 rx_pkts[nb_rx++] = rxm;
791 rxq->rx_tail = rx_id;
794 * If the number of free RX descriptors is greater than the RX free
795 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
797 * Update the RDT with the value of the last processed RX descriptor
798 * minus 1, to guarantee that the RDT register is never equal to the
799 * RDH register, which creates a "full" ring situtation from the
800 * hardware point of view...
802 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
803 if (nb_hold > rxq->rx_free_thresh) {
804 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
805 "nb_hold=%u nb_rx=%u\n",
806 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
807 (unsigned) rx_id, (unsigned) nb_hold,
809 rx_id = (uint16_t) ((rx_id == 0) ?
810 (rxq->nb_rx_desc - 1) : (rx_id - 1));
811 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
814 rxq->nb_rx_hold = nb_hold;
819 eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
822 struct em_rx_queue *rxq;
823 volatile struct e1000_rx_desc *rx_ring;
824 volatile struct e1000_rx_desc *rxdp;
825 struct em_rx_entry *sw_ring;
826 struct em_rx_entry *rxe;
827 struct rte_mbuf *first_seg;
828 struct rte_mbuf *last_seg;
829 struct rte_mbuf *rxm;
830 struct rte_mbuf *nmb;
831 struct e1000_rx_desc rxd;
832 uint64_t dma; /* Physical address of mbuf data buffer */
843 rx_id = rxq->rx_tail;
844 rx_ring = rxq->rx_ring;
845 sw_ring = rxq->sw_ring;
848 * Retrieve RX context of current packet, if any.
850 first_seg = rxq->pkt_first_seg;
851 last_seg = rxq->pkt_last_seg;
853 while (nb_rx < nb_pkts) {
856 * The order of operations here is important as the DD status
857 * bit must not be read after any other descriptor fields.
858 * rx_ring and rxdp are pointing to volatile data so the order
859 * of accesses cannot be reordered by the compiler. If they were
860 * not volatile, they could be reordered which could lead to
861 * using invalid descriptor fields when read from rxd.
863 rxdp = &rx_ring[rx_id];
864 status = rxdp->status;
865 if (! (status & E1000_RXD_STAT_DD))
872 * Allocate a new mbuf to replenish the RX ring descriptor.
873 * If the allocation fails:
874 * - arrange for that RX descriptor to be the first one
875 * being parsed the next time the receive function is
876 * invoked [on the same queue].
878 * - Stop parsing the RX ring and return immediately.
880 * This policy does not drop the packet received in the RX
881 * descriptor for which the allocation of a new mbuf failed.
882 * Thus, it allows that packet to be later retrieved if
883 * mbuf have been freed in the mean time.
884 * As a side effect, holding RX descriptors instead of
885 * systematically giving them back to the NIC may lead to
886 * RX ring exhaustion situations.
887 * However, the NIC can gracefully prevent such situations
888 * to happen by sending specific "back-pressure" flow control
889 * frames to its peer(s).
891 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
892 "status=0x%x data_len=%u\n",
893 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
894 (unsigned) rx_id, (unsigned) status,
895 (unsigned) rte_le_to_cpu_16(rxd.length));
897 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
899 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
900 "queue_id=%u\n", (unsigned) rxq->port_id,
901 (unsigned) rxq->queue_id);
902 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
907 rxe = &sw_ring[rx_id];
909 if (rx_id == rxq->nb_rx_desc)
912 /* Prefetch next mbuf while processing current one. */
913 rte_em_prefetch(sw_ring[rx_id].mbuf);
916 * When next RX descriptor is on a cache-line boundary,
917 * prefetch the next 4 RX descriptors and the next 8 pointers
920 if ((rx_id & 0x3) == 0) {
921 rte_em_prefetch(&rx_ring[rx_id]);
922 rte_em_prefetch(&sw_ring[rx_id]);
926 * Update RX descriptor with the physical address of the new
927 * data buffer of the new allocated mbuf.
931 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
932 rxdp->buffer_addr = dma;
936 * Set data length & data buffer address of mbuf.
938 data_len = rte_le_to_cpu_16(rxd.length);
939 rxm->pkt.data_len = data_len;
940 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
943 * If this is the first buffer of the received packet,
944 * set the pointer to the first mbuf of the packet and
945 * initialize its context.
946 * Otherwise, update the total length and the number of segments
947 * of the current scattered packet, and update the pointer to
948 * the last mbuf of the current packet.
950 if (first_seg == NULL) {
952 first_seg->pkt.pkt_len = data_len;
953 first_seg->pkt.nb_segs = 1;
955 first_seg->pkt.pkt_len += data_len;
956 first_seg->pkt.nb_segs++;
957 last_seg->pkt.next = rxm;
961 * If this is not the last buffer of the received packet,
962 * update the pointer to the last mbuf of the current scattered
963 * packet and continue to parse the RX ring.
965 if (! (status & E1000_RXD_STAT_EOP)) {
971 * This is the last buffer of the received packet.
972 * If the CRC is not stripped by the hardware:
973 * - Subtract the CRC length from the total packet length.
974 * - If the last buffer only contains the whole CRC or a part
975 * of it, free the mbuf associated to the last buffer.
976 * If part of the CRC is also contained in the previous
977 * mbuf, subtract the length of that CRC part from the
978 * data length of the previous mbuf.
980 rxm->pkt.next = NULL;
981 if (unlikely(rxq->crc_len > 0)) {
982 first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
983 if (data_len <= ETHER_CRC_LEN) {
984 rte_pktmbuf_free_seg(rxm);
985 first_seg->pkt.nb_segs--;
986 last_seg->pkt.data_len = (uint16_t)
987 (last_seg->pkt.data_len -
988 (ETHER_CRC_LEN - data_len));
989 last_seg->pkt.next = NULL;
992 (uint16_t) (data_len - ETHER_CRC_LEN);
996 * Initialize the first mbuf of the returned packet:
997 * - RX port identifier,
998 * - hardware offload data, if any:
999 * - IP checksum flag,
1002 first_seg->pkt.in_port = rxq->port_id;
1004 first_seg->ol_flags = rx_desc_status_to_pkt_flags(status);
1005 first_seg->ol_flags |= rx_desc_error_to_pkt_flags(rxd.errors);
1007 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
1008 rxm->pkt.vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
1010 /* Prefetch data of first segment, if configured to do so. */
1011 rte_packet_prefetch(first_seg->pkt.data);
1014 * Store the mbuf address into the next entry of the array
1015 * of returned packets.
1017 rx_pkts[nb_rx++] = first_seg;
1020 * Setup receipt context for a new packet.
1026 * Record index of the next RX descriptor to probe.
1028 rxq->rx_tail = rx_id;
1031 * Save receive context.
1033 rxq->pkt_first_seg = first_seg;
1034 rxq->pkt_last_seg = last_seg;
1037 * If the number of free RX descriptors is greater than the RX free
1038 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1040 * Update the RDT with the value of the last processed RX descriptor
1041 * minus 1, to guarantee that the RDT register is never equal to the
1042 * RDH register, which creates a "full" ring situtation from the
1043 * hardware point of view...
1045 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1046 if (nb_hold > rxq->rx_free_thresh) {
1047 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1048 "nb_hold=%u nb_rx=%u\n",
1049 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1050 (unsigned) rx_id, (unsigned) nb_hold,
1052 rx_id = (uint16_t) ((rx_id == 0) ?
1053 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1054 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1057 rxq->nb_rx_hold = nb_hold;
1062 * Rings setup and release.
1064 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1065 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1066 * This will also optimize cache line size effect.
1067 * H/W supports up to cache line size 128.
1069 #define EM_ALIGN 128
1072 * Maximum number of Ring Descriptors.
1074 * Since RDLEN/TDLEN should be multiple of 128 bytes, the number of ring
1075 * desscriptors should meet the following condition:
1076 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1078 #define EM_MIN_RING_DESC 32
1079 #define EM_MAX_RING_DESC 4096
1081 #define EM_MAX_BUF_SIZE 16384
1082 #define EM_RCTL_FLXBUF_STEP 1024
1084 static const struct rte_memzone *
1085 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1086 uint16_t queue_id, uint32_t ring_size, int socket_id)
1088 const struct rte_memzone *mz;
1089 char z_name[RTE_MEMZONE_NAMESIZE];
1091 rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1092 dev->driver->pci_drv.name, ring_name, dev->data->port_id,
1095 if ((mz = rte_memzone_lookup(z_name)) != 0)
1098 return rte_memzone_reserve(z_name, (uint64_t) ring_size, socket_id, 0);
1102 em_tx_queue_release_mbufs(struct em_tx_queue *txq)
1106 if (txq->sw_ring != NULL) {
1107 for (i = 0; i != txq->nb_tx_desc; i++) {
1108 if (txq->sw_ring[i].mbuf != NULL) {
1109 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1110 txq->sw_ring[i].mbuf = NULL;
1117 em_tx_queue_release(struct em_tx_queue *txq)
1120 em_tx_queue_release_mbufs(txq);
1121 rte_free(txq->sw_ring);
1127 eth_em_tx_queue_release(void *txq)
1129 em_tx_queue_release(txq);
1132 /* (Re)set dynamic em_tx_queue fields to defaults */
1134 em_reset_tx_queue(struct em_tx_queue *txq)
1136 uint16_t i, nb_desc, prev;
1137 static const struct e1000_data_desc txd_init = {
1138 .upper.fields = {.status = E1000_TXD_STAT_DD},
1141 nb_desc = txq->nb_tx_desc;
1143 /* Initialize ring entries */
1145 prev = (uint16_t) (nb_desc - 1);
1147 for (i = 0; i < nb_desc; i++) {
1148 txq->tx_ring[i] = txd_init;
1149 txq->sw_ring[i].mbuf = NULL;
1150 txq->sw_ring[i].last_id = i;
1151 txq->sw_ring[prev].next_id = i;
1156 * Always allow 1 descriptor to be un-allocated to avoid
1157 * a H/W race condition
1159 txq->nb_tx_free = (uint16_t)(nb_desc - 1);
1160 txq->last_desc_cleaned = (uint16_t)(nb_desc - 1);
1161 txq->nb_tx_used = 0;
1164 memset((void*)&txq->ctx_cache, 0, sizeof (txq->ctx_cache));
1168 eth_em_tx_queue_setup(struct rte_eth_dev *dev,
1171 unsigned int socket_id,
1172 const struct rte_eth_txconf *tx_conf)
1174 const struct rte_memzone *tz;
1175 struct em_tx_queue *txq;
1176 struct e1000_hw *hw;
1178 uint16_t tx_rs_thresh, tx_free_thresh;
1180 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1183 * Validate number of transmit descriptors.
1184 * It must not exceed hardware maximum, and must be multiple
1187 if (((nb_desc * sizeof(*txq->tx_ring)) % EM_ALIGN) != 0 ||
1188 (nb_desc > EM_MAX_RING_DESC) ||
1189 (nb_desc < EM_MIN_RING_DESC)) {
1193 tx_free_thresh = tx_conf->tx_free_thresh;
1194 if (tx_free_thresh == 0)
1195 tx_free_thresh = RTE_MIN(nb_desc / 4, DEFAULT_TX_FREE_THRESH);
1197 tx_rs_thresh = tx_conf->tx_rs_thresh;
1198 if (tx_rs_thresh == 0)
1199 tx_rs_thresh = RTE_MIN(tx_free_thresh, DEFAULT_TX_RS_THRESH);
1201 if (tx_free_thresh >= (nb_desc - 3)) {
1203 "tx_free_thresh must be less than the "
1204 "number of TX descriptors minus 3. "
1205 "(tx_free_thresh=%u port=%d queue=%d)\n",
1206 tx_free_thresh, dev->data->port_id, queue_idx);
1209 if (tx_rs_thresh > tx_free_thresh) {
1211 "tx_rs_thresh must be less than or equal to "
1213 "(tx_free_thresh=%u tx_rs_thresh=%u "
1214 "port=%d queue=%d)\n",
1215 tx_free_thresh, tx_rs_thresh, dev->data->port_id,
1221 * If rs_bit_thresh is greater than 1, then TX WTHRESH should be
1222 * set to 0. If WTHRESH is greater than zero, the RS bit is ignored
1223 * by the NIC and all descriptors are written back after the NIC
1224 * accumulates WTHRESH descriptors.
1226 if (tx_conf->tx_thresh.wthresh != 0 && tx_rs_thresh != 1) {
1228 "TX WTHRESH must be set to 0 if "
1229 "tx_rs_thresh is greater than 1. "
1230 "(tx_rs_thresh=%u port=%d queue=%d)\n",
1231 tx_rs_thresh, dev->data->port_id, queue_idx);
1235 /* Free memory prior to re-allocation if needed... */
1236 if (dev->data->tx_queues[queue_idx] != NULL) {
1237 em_tx_queue_release(dev->data->tx_queues[queue_idx]);
1238 dev->data->tx_queues[queue_idx] = NULL;
1242 * Allocate TX ring hardware descriptors. A memzone large enough to
1243 * handle the maximum ring size is allocated in order to allow for
1244 * resizing in later calls to the queue setup function.
1246 tsize = sizeof (txq->tx_ring[0]) * EM_MAX_RING_DESC;
1247 if ((tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx, tsize,
1248 socket_id)) == NULL)
1251 /* Allocate the tx queue data structure. */
1252 if ((txq = rte_zmalloc("ethdev TX queue", sizeof(*txq),
1253 CACHE_LINE_SIZE)) == NULL)
1256 /* Allocate software ring */
1257 if ((txq->sw_ring = rte_zmalloc("txq->sw_ring",
1258 sizeof(txq->sw_ring[0]) * nb_desc,
1259 CACHE_LINE_SIZE)) == NULL) {
1260 em_tx_queue_release(txq);
1264 txq->nb_tx_desc = nb_desc;
1265 txq->tx_free_thresh = tx_free_thresh;
1266 txq->tx_rs_thresh = tx_rs_thresh;
1267 txq->pthresh = tx_conf->tx_thresh.pthresh;
1268 txq->hthresh = tx_conf->tx_thresh.hthresh;
1269 txq->wthresh = tx_conf->tx_thresh.wthresh;
1270 txq->queue_id = queue_idx;
1271 txq->port_id = dev->data->port_id;
1273 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1274 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1275 txq->tx_ring = (struct e1000_data_desc *) tz->addr;
1277 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1278 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1280 em_reset_tx_queue(txq);
1282 dev->data->tx_queues[queue_idx] = txq;
1287 em_rx_queue_release_mbufs(struct em_rx_queue *rxq)
1291 if (rxq->sw_ring != NULL) {
1292 for (i = 0; i != rxq->nb_rx_desc; i++) {
1293 if (rxq->sw_ring[i].mbuf != NULL) {
1294 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1295 rxq->sw_ring[i].mbuf = NULL;
1302 em_rx_queue_release(struct em_rx_queue *rxq)
1305 em_rx_queue_release_mbufs(rxq);
1306 rte_free(rxq->sw_ring);
1312 eth_em_rx_queue_release(void *rxq)
1314 em_rx_queue_release(rxq);
1317 /* Reset dynamic em_rx_queue fields back to defaults */
1319 em_reset_rx_queue(struct em_rx_queue *rxq)
1322 rxq->nb_rx_hold = 0;
1323 rxq->pkt_first_seg = NULL;
1324 rxq->pkt_last_seg = NULL;
1328 eth_em_rx_queue_setup(struct rte_eth_dev *dev,
1331 unsigned int socket_id,
1332 const struct rte_eth_rxconf *rx_conf,
1333 struct rte_mempool *mp)
1335 const struct rte_memzone *rz;
1336 struct em_rx_queue *rxq;
1337 struct e1000_hw *hw;
1340 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1343 * Validate number of receive descriptors.
1344 * It must not exceed hardware maximum, and must be multiple
1347 if (((nb_desc * sizeof(rxq->rx_ring[0])) % EM_ALIGN) != 0 ||
1348 (nb_desc > EM_MAX_RING_DESC) ||
1349 (nb_desc < EM_MIN_RING_DESC)) {
1354 * EM devices don't support drop_en functionality
1356 if (rx_conf->rx_drop_en) {
1357 RTE_LOG(ERR, PMD, "drop_en functionality not supported by device\n");
1361 /* Free memory prior to re-allocation if needed. */
1362 if (dev->data->rx_queues[queue_idx] != NULL) {
1363 em_rx_queue_release(dev->data->rx_queues[queue_idx]);
1364 dev->data->rx_queues[queue_idx] = NULL;
1367 /* Allocate RX ring for max possible mumber of hardware descriptors. */
1368 rsize = sizeof (rxq->rx_ring[0]) * EM_MAX_RING_DESC;
1369 if ((rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, rsize,
1370 socket_id)) == NULL)
1373 /* Allocate the RX queue data structure. */
1374 if ((rxq = rte_zmalloc("ethdev RX queue", sizeof(*rxq),
1375 CACHE_LINE_SIZE)) == NULL)
1378 /* Allocate software ring. */
1379 if ((rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1380 sizeof (rxq->sw_ring[0]) * nb_desc,
1381 CACHE_LINE_SIZE)) == NULL) {
1382 em_rx_queue_release(rxq);
1387 rxq->nb_rx_desc = nb_desc;
1388 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1389 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1390 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1391 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1392 rxq->queue_id = queue_idx;
1393 rxq->port_id = dev->data->port_id;
1394 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
1397 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1398 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1399 rxq->rx_ring = (struct e1000_rx_desc *) rz->addr;
1401 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1402 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1404 dev->data->rx_queues[queue_idx] = rxq;
1405 em_reset_rx_queue(rxq);
1411 em_dev_clear_queues(struct rte_eth_dev *dev)
1414 struct em_tx_queue *txq;
1415 struct em_rx_queue *rxq;
1417 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1418 txq = dev->data->tx_queues[i];
1420 em_tx_queue_release_mbufs(txq);
1421 em_reset_tx_queue(txq);
1425 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1426 rxq = dev->data->rx_queues[i];
1428 em_rx_queue_release_mbufs(rxq);
1429 em_reset_rx_queue(rxq);
1435 * Takes as input/output parameter RX buffer size.
1436 * Returns (BSIZE | BSEX | FLXBUF) fields of RCTL register.
1439 em_rctl_bsize(enum e1000_mac_type hwtyp, uint32_t *bufsz)
1442 * For BSIZE & BSEX all configurable sizes are:
1443 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1444 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1445 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1446 * 2048: rctl |= E1000_RCTL_SZ_2048;
1447 * 1024: rctl |= E1000_RCTL_SZ_1024;
1448 * 512: rctl |= E1000_RCTL_SZ_512;
1449 * 256: rctl |= E1000_RCTL_SZ_256;
1451 static const struct {
1454 } bufsz_to_rctl[] = {
1455 {16384, (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX)},
1456 {8192, (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX)},
1457 {4096, (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX)},
1458 {2048, E1000_RCTL_SZ_2048},
1459 {1024, E1000_RCTL_SZ_1024},
1460 {512, E1000_RCTL_SZ_512},
1461 {256, E1000_RCTL_SZ_256},
1465 uint32_t rctl_bsize;
1467 rctl_bsize = *bufsz;
1470 * Starting from 82571 it is possible to specify RX buffer size
1471 * by RCTL.FLXBUF. When this field is different from zero, the
1472 * RX buffer size = RCTL.FLXBUF * 1K
1473 * (e.g. t is possible to specify RX buffer size 1,2,...,15KB).
1474 * It is working ok on real HW, but by some reason doesn't work
1475 * on VMware emulated 82574L.
1476 * So for now, always use BSIZE/BSEX to setup RX buffer size.
1477 * If you don't plan to use it on VMware emulated 82574L and
1478 * would like to specify RX buffer size in 1K granularity,
1479 * uncomment the following lines:
1480 * ***************************************************************
1481 * if (hwtyp >= e1000_82571 && hwtyp <= e1000_82574 &&
1482 * rctl_bsize >= EM_RCTL_FLXBUF_STEP) {
1483 * rctl_bsize /= EM_RCTL_FLXBUF_STEP;
1484 * *bufsz = rctl_bsize;
1485 * return (rctl_bsize << E1000_RCTL_FLXBUF_SHIFT &
1486 * E1000_RCTL_FLXBUF_MASK);
1488 * ***************************************************************
1491 for (i = 0; i != sizeof(bufsz_to_rctl) / sizeof(bufsz_to_rctl[0]);
1493 if (rctl_bsize >= bufsz_to_rctl[i].bufsz) {
1494 *bufsz = bufsz_to_rctl[i].bufsz;
1495 return (bufsz_to_rctl[i].rctl);
1499 /* Should never happen. */
1504 em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq)
1506 struct em_rx_entry *rxe = rxq->sw_ring;
1509 static const struct e1000_rx_desc rxd_init = {
1513 /* Initialize software ring entries */
1514 for (i = 0; i < rxq->nb_rx_desc; i++) {
1515 volatile struct e1000_rx_desc *rxd;
1516 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1519 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1520 "queue_id=%hu\n", rxq->queue_id);
1521 em_rx_queue_release(rxq);
1525 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1527 /* Clear HW ring memory */
1528 rxq->rx_ring[i] = rxd_init;
1530 rxd = &rxq->rx_ring[i];
1531 rxd->buffer_addr = dma_addr;
1538 /*********************************************************************
1540 * Enable receive unit.
1542 **********************************************************************/
1544 eth_em_rx_init(struct rte_eth_dev *dev)
1546 struct e1000_hw *hw;
1547 struct em_rx_queue *rxq;
1551 uint32_t rctl_bsize;
1555 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1558 * Make sure receives are disabled while setting
1559 * up the descriptor ring.
1561 rctl = E1000_READ_REG(hw, E1000_RCTL);
1562 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1564 rfctl = E1000_READ_REG(hw, E1000_RFCTL);
1566 /* Disable extended descriptor type. */
1567 rfctl &= ~E1000_RFCTL_EXTEN;
1568 /* Disable accelerated acknowledge */
1569 if (hw->mac.type == e1000_82574)
1570 rfctl |= E1000_RFCTL_ACK_DIS;
1572 E1000_WRITE_REG(hw, E1000_RFCTL, rfctl);
1575 * XXX TEMPORARY WORKAROUND: on some systems with 82573
1576 * long latencies are observed, like Lenovo X60. This
1577 * change eliminates the problem, but since having positive
1578 * values in RDTR is a known source of problems on other
1579 * platforms another solution is being sought.
1581 if (hw->mac.type == e1000_82573)
1582 E1000_WRITE_REG(hw, E1000_RDTR, 0x20);
1584 dev->rx_pkt_burst = (eth_rx_burst_t)eth_em_recv_pkts;
1586 /* Determine RX bufsize. */
1587 rctl_bsize = EM_MAX_BUF_SIZE;
1588 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1589 struct rte_pktmbuf_pool_private *mbp_priv;
1592 rxq = dev->data->rx_queues[i];
1593 mbp_priv = rte_mempool_get_priv(rxq->mb_pool);
1594 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1595 rctl_bsize = RTE_MIN(rctl_bsize, buf_size);
1598 rctl |= em_rctl_bsize(hw->mac.type, &rctl_bsize);
1600 /* Configure and enable each RX queue. */
1601 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1605 rxq = dev->data->rx_queues[i];
1607 /* Allocate buffers for descriptor rings and setup queue */
1608 ret = em_alloc_rx_queue_mbufs(rxq);
1613 * Reset crc_len in case it was changed after queue setup by a
1617 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1620 bus_addr = rxq->rx_ring_phys_addr;
1621 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1623 sizeof(*rxq->rx_ring));
1624 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1625 (uint32_t)(bus_addr >> 32));
1626 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1628 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1629 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1631 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1632 rxdctl &= 0xFE000000;
1633 rxdctl |= rxq->pthresh & 0x3F;
1634 rxdctl |= (rxq->hthresh & 0x3F) << 8;
1635 rxdctl |= (rxq->wthresh & 0x3F) << 16;
1636 rxdctl |= E1000_RXDCTL_GRAN;
1637 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1640 * Due to EM devices not having any sort of hardware
1641 * limit for packet length, jumbo frame of any size
1642 * can be accepted, thus we have to enable scattered
1643 * rx if jumbo frames are enabled (or if buffer size
1644 * is too small to accomodate non-jumbo packets)
1645 * to avoid splitting packets that don't fit into
1648 if (dev->data->dev_conf.rxmode.jumbo_frame ||
1649 rctl_bsize < ETHER_MAX_LEN) {
1651 (eth_rx_burst_t)eth_em_recv_scattered_pkts;
1652 dev->data->scattered_rx = 1;
1657 * Setup the Checksum Register.
1658 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1660 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1662 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1663 rxcsum |= E1000_RXCSUM_IPOFL;
1665 rxcsum &= ~E1000_RXCSUM_IPOFL;
1666 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1668 /* No MRQ or RSS support for now */
1670 /* Set early receive threshold on appropriate hw */
1671 if ((hw->mac.type == e1000_ich9lan ||
1672 hw->mac.type == e1000_pch2lan ||
1673 hw->mac.type == e1000_ich10lan) &&
1674 dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1675 u32 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(0));
1676 E1000_WRITE_REG(hw, E1000_RXDCTL(0), rxdctl | 3);
1677 E1000_WRITE_REG(hw, E1000_ERT, 0x100 | (1 << 13));
1680 if (hw->mac.type == e1000_pch2lan) {
1681 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1682 e1000_lv_jumbo_workaround_ich8lan(hw, TRUE);
1684 e1000_lv_jumbo_workaround_ich8lan(hw, FALSE);
1687 /* Setup the Receive Control Register. */
1688 if (dev->data->dev_conf.rxmode.hw_strip_crc)
1689 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1691 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1693 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1694 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1695 E1000_RCTL_RDMTS_HALF |
1696 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1698 /* Make sure VLAN Filters are off. */
1699 rctl &= ~E1000_RCTL_VFE;
1700 /* Don't store bad packets. */
1701 rctl &= ~E1000_RCTL_SBP;
1702 /* Legacy descriptor type. */
1703 rctl &= ~E1000_RCTL_DTYP_MASK;
1706 * Configure support of jumbo frames, if any.
1708 if (dev->data->dev_conf.rxmode.jumbo_frame == 1)
1709 rctl |= E1000_RCTL_LPE;
1711 rctl &= ~E1000_RCTL_LPE;
1713 /* Enable Receives. */
1714 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1719 /*********************************************************************
1721 * Enable transmit unit.
1723 **********************************************************************/
1725 eth_em_tx_init(struct rte_eth_dev *dev)
1727 struct e1000_hw *hw;
1728 struct em_tx_queue *txq;
1733 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1735 /* Setup the Base and Length of the Tx Descriptor Rings. */
1736 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1739 txq = dev->data->tx_queues[i];
1740 bus_addr = txq->tx_ring_phys_addr;
1741 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1743 sizeof(*txq->tx_ring));
1744 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1745 (uint32_t)(bus_addr >> 32));
1746 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1748 /* Setup the HW Tx Head and Tail descriptor pointers. */
1749 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1750 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1752 /* Setup Transmit threshold registers. */
1753 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1755 * bit 22 is reserved, on some models should always be 0,
1756 * on others - always 1.
1758 txdctl &= E1000_TXDCTL_COUNT_DESC;
1759 txdctl |= txq->pthresh & 0x3F;
1760 txdctl |= (txq->hthresh & 0x3F) << 8;
1761 txdctl |= (txq->wthresh & 0x3F) << 16;
1762 txdctl |= E1000_TXDCTL_GRAN;
1763 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1766 /* Program the Transmit Control Register. */
1767 tctl = E1000_READ_REG(hw, E1000_TCTL);
1768 tctl &= ~E1000_TCTL_CT;
1769 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1770 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1772 /* This write will effectively turn on the transmit unit. */
1773 E1000_WRITE_REG(hw, E1000_TCTL, tctl);