4 * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/queue.h>
46 #include <rte_interrupts.h>
47 #include <rte_byteorder.h>
48 #include <rte_common.h>
50 #include <rte_debug.h>
52 #include <rte_memory.h>
53 #include <rte_memcpy.h>
54 #include <rte_memzone.h>
55 #include <rte_launch.h>
56 #include <rte_tailq.h>
58 #include <rte_per_lcore.h>
59 #include <rte_lcore.h>
60 #include <rte_atomic.h>
61 #include <rte_branch_prediction.h>
63 #include <rte_mempool.h>
64 #include <rte_malloc.h>
66 #include <rte_ether.h>
67 #include <rte_ethdev.h>
68 #include <rte_prefetch.h>
72 #include <rte_string_fns.h>
74 #include "e1000_logs.h"
75 #include "igb/e1000_api.h"
76 #include "e1000_ethdev.h"
78 static inline struct rte_mbuf *
79 rte_rxmbuf_alloc(struct rte_mempool *mp)
83 m = __rte_mbuf_raw_alloc(mp);
84 __rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
88 #define RTE_MBUF_DATA_DMA_ADDR(mb) \
89 (uint64_t) ((mb)->buf_physaddr + \
90 (uint64_t) ((char *)((mb)->pkt.data) - \
91 (char *)(mb)->buf_addr))
93 #define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
94 (uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
97 * Structure associated with each descriptor of the RX ring of a RX queue.
100 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
104 * Structure associated with each descriptor of the TX ring of a TX queue.
106 struct igb_tx_entry {
107 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
108 uint16_t next_id; /**< Index of next descriptor in ring. */
109 uint16_t last_id; /**< Index of last scattered descriptor. */
113 * Structure associated with each RX queue.
115 struct igb_rx_queue {
116 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
117 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
118 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
119 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
120 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
121 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
122 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
123 uint16_t nb_rx_desc; /**< number of RX descriptors. */
124 uint16_t rx_tail; /**< current value of RDT register. */
125 uint16_t nb_rx_hold; /**< number of held free RX desc. */
126 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
127 uint16_t queue_id; /**< RX queue index. */
128 uint8_t port_id; /**< Device port identifier. */
129 uint8_t pthresh; /**< Prefetch threshold register. */
130 uint8_t hthresh; /**< Host threshold register. */
131 uint8_t wthresh; /**< Write-back threshold register. */
132 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
136 * Hardware context number
138 enum igb_advctx_num {
139 IGB_CTX_0 = 0, /**< CTX0 */
140 IGB_CTX_1 = 1, /**< CTX1 */
141 IGB_CTX_NUM = 2, /**< CTX NUM */
145 * Strucutre to check if new context need be built
147 struct igb_advctx_info {
148 uint16_t flags; /**< ol_flags related to context build. */
149 uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
150 uint32_t vlan_macip_lens; /**< vlan, mac.ip length. */
154 * Structure associated with each TX queue.
156 struct igb_tx_queue {
157 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
158 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
159 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
160 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
161 uint32_t txd_type; /**< Device-specific TXD type */
162 uint16_t nb_tx_desc; /**< number of TX descriptors. */
163 uint16_t tx_tail; /**< Current value of TDT register. */
164 uint16_t tx_head; /**< Index of first used TX descriptor. */
165 uint16_t queue_id; /**< TX queue index. */
166 uint8_t port_id; /**< Device port identifier. */
167 uint8_t pthresh; /**< Prefetch threshold register. */
168 uint8_t hthresh; /**< Host threshold register. */
169 uint8_t wthresh; /**< Write-back threshold register. */
170 uint32_t ctx_curr; /**< Current used hardware descriptor. */
171 uint32_t ctx_start;/**< Start context position for transmit queue. */
172 struct igb_advctx_info ctx_cache[IGB_CTX_NUM]; /**< Hardware context history.*/
176 #define RTE_PMD_USE_PREFETCH
179 #ifdef RTE_PMD_USE_PREFETCH
180 #define rte_igb_prefetch(p) rte_prefetch0(p)
182 #define rte_igb_prefetch(p) do {} while(0)
185 #ifdef RTE_PMD_PACKET_PREFETCH
186 #define rte_packet_prefetch(p) rte_prefetch1(p)
188 #define rte_packet_prefetch(p) do {} while(0)
191 /*********************************************************************
195 **********************************************************************/
198 * Advanced context descriptor are almost same between igb/ixgbe
199 * This is a separate function, looking for optimization opportunity here
200 * Rework required to go with the pre-defined values.
204 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
205 volatile struct e1000_adv_tx_context_desc *ctx_txd,
206 uint16_t ol_flags, uint32_t vlan_macip_lens)
208 uint32_t type_tucmd_mlhl;
209 uint32_t mss_l4len_idx;
210 uint32_t ctx_idx, ctx_curr;
213 ctx_curr = txq->ctx_curr;
214 ctx_idx = ctx_curr + txq->ctx_start;
219 if (ol_flags & PKT_TX_VLAN_PKT) {
220 cmp_mask |= TX_VLAN_CMP_MASK;
223 if (ol_flags & PKT_TX_IP_CKSUM) {
224 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
225 cmp_mask |= TX_MAC_LEN_CMP_MASK;
228 /* Specify which HW CTX to upload. */
229 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
230 switch (ol_flags & PKT_TX_L4_MASK) {
231 case PKT_TX_UDP_CKSUM:
232 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
233 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
234 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
235 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
237 case PKT_TX_TCP_CKSUM:
238 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
239 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
240 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
241 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
243 case PKT_TX_SCTP_CKSUM:
244 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
245 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
246 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
247 cmp_mask |= TX_MACIP_LEN_CMP_MASK;
250 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
251 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
255 txq->ctx_cache[ctx_curr].flags = ol_flags;
256 txq->ctx_cache[ctx_curr].cmp_mask = cmp_mask;
257 txq->ctx_cache[ctx_curr].vlan_macip_lens = vlan_macip_lens & cmp_mask;
259 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
260 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
261 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
262 ctx_txd->seqnum_seed = 0;
266 * Check which hardware context can be used. Use the existing match
267 * or create a new context descriptor.
269 static inline uint32_t
270 what_advctx_update(struct igb_tx_queue *txq, uint16_t flags,
271 uint32_t vlan_macip_lens)
273 /* If match with the current context */
274 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
275 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
276 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
277 return txq->ctx_curr;
280 /* If match with the second context */
282 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
283 (txq->ctx_cache[txq->ctx_curr].vlan_macip_lens ==
284 (txq->ctx_cache[txq->ctx_curr].cmp_mask & vlan_macip_lens)))) {
285 return txq->ctx_curr;
288 /* Mismatch, use the previous context */
289 return (IGB_CTX_NUM);
292 static inline uint32_t
293 tx_desc_cksum_flags_to_olinfo(uint16_t ol_flags)
295 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
296 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
299 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
300 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
304 static inline uint32_t
305 tx_desc_vlan_flags_to_cmdtype(uint16_t ol_flags)
307 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
308 return vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
312 eth_igb_xmit_pkts(struct igb_tx_queue *txq, struct rte_mbuf **tx_pkts,
315 struct igb_tx_entry *sw_ring;
316 struct igb_tx_entry *txe, *txn;
317 volatile union e1000_adv_tx_desc *txr;
318 volatile union e1000_adv_tx_desc *txd;
319 struct rte_mbuf *tx_pkt;
320 struct rte_mbuf *m_seg;
321 uint64_t buf_dma_addr;
322 uint32_t olinfo_status;
323 uint32_t cmd_type_len;
334 uint32_t vlan_macip_lens;
336 sw_ring = txq->sw_ring;
338 tx_id = txq->tx_tail;
339 txe = &sw_ring[tx_id];
341 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
343 pkt_len = tx_pkt->pkt.pkt_len;
345 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
348 * The number of descriptors that must be allocated for a
349 * packet is the number of segments of that packet, plus 1
350 * Context Descriptor for the VLAN Tag Identifier, if any.
351 * Determine the last TX descriptor to allocate in the TX ring
352 * for the packet, starting from the current position (tx_id)
355 tx_last = (uint16_t) (tx_id + tx_pkt->pkt.nb_segs - 1);
357 ol_flags = tx_pkt->ol_flags;
358 vlan_macip_lens = (tx_pkt->pkt.vlan_tci << 16) | (tx_pkt->pkt.l2_len << E1000_ADVTXD_MACLEN_SHIFT) | tx_pkt->pkt.l3_len;
359 tx_ol_req = (ol_flags & PKT_TX_OFFLOAD_MASK);
361 /* If a Context Descriptor need be built . */
363 ctx = what_advctx_update(txq, tx_ol_req,vlan_macip_lens);
364 /* Only allocate context descriptor if required*/
365 new_ctx = (ctx == IGB_CTX_NUM);
367 tx_last = (uint16_t) (tx_last + new_ctx);
369 if (tx_last >= txq->nb_tx_desc)
370 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
372 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
373 " tx_first=%u tx_last=%u\n",
374 (unsigned) txq->port_id,
375 (unsigned) txq->queue_id,
381 * Check if there are enough free descriptors in the TX ring
382 * to transmit the next packet.
383 * This operation is based on the two following rules:
385 * 1- Only check that the last needed TX descriptor can be
386 * allocated (by construction, if that descriptor is free,
387 * all intermediate ones are also free).
389 * For this purpose, the index of the last TX descriptor
390 * used for a packet (the "last descriptor" of a packet)
391 * is recorded in the TX entries (the last one included)
392 * that are associated with all TX descriptors allocated
395 * 2- Avoid to allocate the last free TX descriptor of the
396 * ring, in order to never set the TDT register with the
397 * same value stored in parallel by the NIC in the TDH
398 * register, which makes the TX engine of the NIC enter
399 * in a deadlock situation.
401 * By extension, avoid to allocate a free descriptor that
402 * belongs to the last set of free descriptors allocated
403 * to the same packet previously transmitted.
407 * The "last descriptor" of the previously sent packet, if any,
408 * which used the last descriptor to allocate.
410 tx_end = sw_ring[tx_last].last_id;
413 * The next descriptor following that "last descriptor" in the
416 tx_end = sw_ring[tx_end].next_id;
419 * The "last descriptor" associated with that next descriptor.
421 tx_end = sw_ring[tx_end].last_id;
424 * Check that this descriptor is free.
426 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
433 * Set common flags of all TX Data Descriptors.
435 * The following bits must be set in all Data Descriptors:
436 * - E1000_ADVTXD_DTYP_DATA
437 * - E1000_ADVTXD_DCMD_DEXT
439 * The following bits must be set in the first Data Descriptor
440 * and are ignored in the other ones:
441 * - E1000_ADVTXD_DCMD_IFCS
442 * - E1000_ADVTXD_MAC_1588
443 * - E1000_ADVTXD_DCMD_VLE
445 * The following bits must only be set in the last Data
447 * - E1000_TXD_CMD_EOP
449 * The following bits can be set in any Data Descriptor, but
450 * are only set in the last Data Descriptor:
453 cmd_type_len = txq->txd_type |
454 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
455 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
456 #if defined(RTE_LIBRTE_IEEE1588)
457 if (ol_flags & PKT_TX_IEEE1588_TMST)
458 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
461 /* Setup TX Advanced context descriptor if required */
463 volatile struct e1000_adv_tx_context_desc *
466 ctx_txd = (volatile struct
467 e1000_adv_tx_context_desc *)
470 txn = &sw_ring[txe->next_id];
471 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
473 if (txe->mbuf != NULL) {
474 rte_pktmbuf_free_seg(txe->mbuf);
478 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
481 txe->last_id = tx_last;
482 tx_id = txe->next_id;
486 /* Setup the TX Advanced Data Descriptor */
487 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(ol_flags);
488 olinfo_status |= tx_desc_cksum_flags_to_olinfo(ol_flags);
489 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
494 txn = &sw_ring[txe->next_id];
497 if (txe->mbuf != NULL)
498 rte_pktmbuf_free_seg(txe->mbuf);
502 * Set up transmit descriptor.
504 slen = (uint16_t) m_seg->pkt.data_len;
505 buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
506 txd->read.buffer_addr =
507 rte_cpu_to_le_64(buf_dma_addr);
508 txd->read.cmd_type_len =
509 rte_cpu_to_le_32(cmd_type_len | slen);
510 txd->read.olinfo_status =
511 rte_cpu_to_le_32(olinfo_status);
512 txe->last_id = tx_last;
513 tx_id = txe->next_id;
515 m_seg = m_seg->pkt.next;
516 } while (m_seg != NULL);
519 * The last packet data descriptor needs End Of Packet (EOP)
520 * and Report Status (RS).
522 txd->read.cmd_type_len |=
523 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
529 * Set the Transmit Descriptor Tail (TDT).
531 E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id);
532 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
533 (unsigned) txq->port_id, (unsigned) txq->queue_id,
534 (unsigned) tx_id, (unsigned) nb_tx);
535 txq->tx_tail = tx_id;
540 /*********************************************************************
544 **********************************************************************/
545 static inline uint16_t
546 rx_desc_hlen_type_rss_to_pkt_flags(uint32_t hl_tp_rs)
550 static uint16_t ip_pkt_types_map[16] = {
551 0, PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT, PKT_RX_IPV4_HDR_EXT,
552 PKT_RX_IPV6_HDR, 0, 0, 0,
553 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
554 PKT_RX_IPV6_HDR_EXT, 0, 0, 0,
557 #if defined(RTE_LIBRTE_IEEE1588)
558 static uint32_t ip_pkt_etqf_map[8] = {
559 0, 0, 0, PKT_RX_IEEE1588_PTP,
563 pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ?
564 ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07] :
565 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
567 pkt_flags = (uint16_t) (hl_tp_rs & E1000_RXDADV_PKTTYPE_ETQF) ? 0 :
568 ip_pkt_types_map[(hl_tp_rs >> 4) & 0x0F];
570 return pkt_flags | (uint16_t) (((hl_tp_rs & 0x0F) == 0) ? 0 :
574 static inline uint16_t
575 rx_desc_status_to_pkt_flags(uint32_t rx_status)
579 /* Check if VLAN present */
580 pkt_flags = (uint16_t) (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0;
582 #if defined(RTE_LIBRTE_IEEE1588)
583 if (rx_status & E1000_RXD_STAT_TMST)
584 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
589 static inline uint16_t
590 rx_desc_error_to_pkt_flags(uint32_t rx_status)
593 * Bit 30: IPE, IPv4 checksum error
594 * Bit 29: L4I, L4I integrity error
597 static uint16_t error_to_pkt_flags_map[4] = {
598 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
599 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
601 return error_to_pkt_flags_map[(rx_status >>
602 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
606 eth_igb_recv_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
609 volatile union e1000_adv_rx_desc *rx_ring;
610 volatile union e1000_adv_rx_desc *rxdp;
611 struct igb_rx_entry *sw_ring;
612 struct igb_rx_entry *rxe;
613 struct rte_mbuf *rxm;
614 struct rte_mbuf *nmb;
615 union e1000_adv_rx_desc rxd;
618 uint32_t hlen_type_rss;
627 rx_id = rxq->rx_tail;
628 rx_ring = rxq->rx_ring;
629 sw_ring = rxq->sw_ring;
630 while (nb_rx < nb_pkts) {
632 * The order of operations here is important as the DD status
633 * bit must not be read after any other descriptor fields.
634 * rx_ring and rxdp are pointing to volatile data so the order
635 * of accesses cannot be reordered by the compiler. If they were
636 * not volatile, they could be reordered which could lead to
637 * using invalid descriptor fields when read from rxd.
639 rxdp = &rx_ring[rx_id];
640 staterr = rxdp->wb.upper.status_error;
641 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
648 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
649 * likely to be invalid and to be dropped by the various
650 * validation checks performed by the network stack.
652 * Allocate a new mbuf to replenish the RX ring descriptor.
653 * If the allocation fails:
654 * - arrange for that RX descriptor to be the first one
655 * being parsed the next time the receive function is
656 * invoked [on the same queue].
658 * - Stop parsing the RX ring and return immediately.
660 * This policy do not drop the packet received in the RX
661 * descriptor for which the allocation of a new mbuf failed.
662 * Thus, it allows that packet to be later retrieved if
663 * mbuf have been freed in the mean time.
664 * As a side effect, holding RX descriptors instead of
665 * systematically giving them back to the NIC may lead to
666 * RX ring exhaustion situations.
667 * However, the NIC can gracefully prevent such situations
668 * to happen by sending specific "back-pressure" flow control
669 * frames to its peer(s).
671 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
672 "staterr=0x%x pkt_len=%u\n",
673 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
674 (unsigned) rx_id, (unsigned) staterr,
675 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
677 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
679 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
680 "queue_id=%u\n", (unsigned) rxq->port_id,
681 (unsigned) rxq->queue_id);
682 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
687 rxe = &sw_ring[rx_id];
689 if (rx_id == rxq->nb_rx_desc)
692 /* Prefetch next mbuf while processing current one. */
693 rte_igb_prefetch(sw_ring[rx_id].mbuf);
696 * When next RX descriptor is on a cache-line boundary,
697 * prefetch the next 4 RX descriptors and the next 8 pointers
700 if ((rx_id & 0x3) == 0) {
701 rte_igb_prefetch(&rx_ring[rx_id]);
702 rte_igb_prefetch(&sw_ring[rx_id]);
708 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
709 rxdp->read.hdr_addr = dma_addr;
710 rxdp->read.pkt_addr = dma_addr;
713 * Initialize the returned mbuf.
714 * 1) setup generic mbuf fields:
715 * - number of segments,
718 * - RX port identifier.
719 * 2) integrate hardware offload data, if any:
721 * - IP checksum flag,
722 * - VLAN TCI, if any,
725 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
727 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
728 rte_packet_prefetch(rxm->pkt.data);
729 rxm->pkt.nb_segs = 1;
730 rxm->pkt.next = NULL;
731 rxm->pkt.pkt_len = pkt_len;
732 rxm->pkt.data_len = pkt_len;
733 rxm->pkt.in_port = rxq->port_id;
735 rxm->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
736 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
737 /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
738 rxm->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
740 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
741 pkt_flags = (pkt_flags |
742 rx_desc_status_to_pkt_flags(staterr));
743 pkt_flags = (pkt_flags |
744 rx_desc_error_to_pkt_flags(staterr));
745 rxm->ol_flags = pkt_flags;
748 * Store the mbuf address into the next entry of the array
749 * of returned packets.
751 rx_pkts[nb_rx++] = rxm;
753 rxq->rx_tail = rx_id;
756 * If the number of free RX descriptors is greater than the RX free
757 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
759 * Update the RDT with the value of the last processed RX descriptor
760 * minus 1, to guarantee that the RDT register is never equal to the
761 * RDH register, which creates a "full" ring situtation from the
762 * hardware point of view...
764 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
765 if (nb_hold > rxq->rx_free_thresh) {
766 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
767 "nb_hold=%u nb_rx=%u\n",
768 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
769 (unsigned) rx_id, (unsigned) nb_hold,
771 rx_id = (uint16_t) ((rx_id == 0) ?
772 (rxq->nb_rx_desc - 1) : (rx_id - 1));
773 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
776 rxq->nb_rx_hold = nb_hold;
781 eth_igb_recv_scattered_pkts(struct igb_rx_queue *rxq, struct rte_mbuf **rx_pkts,
784 volatile union e1000_adv_rx_desc *rx_ring;
785 volatile union e1000_adv_rx_desc *rxdp;
786 struct igb_rx_entry *sw_ring;
787 struct igb_rx_entry *rxe;
788 struct rte_mbuf *first_seg;
789 struct rte_mbuf *last_seg;
790 struct rte_mbuf *rxm;
791 struct rte_mbuf *nmb;
792 union e1000_adv_rx_desc rxd;
793 uint64_t dma; /* Physical address of mbuf data buffer */
795 uint32_t hlen_type_rss;
804 rx_id = rxq->rx_tail;
805 rx_ring = rxq->rx_ring;
806 sw_ring = rxq->sw_ring;
809 * Retrieve RX context of current packet, if any.
811 first_seg = rxq->pkt_first_seg;
812 last_seg = rxq->pkt_last_seg;
814 while (nb_rx < nb_pkts) {
817 * The order of operations here is important as the DD status
818 * bit must not be read after any other descriptor fields.
819 * rx_ring and rxdp are pointing to volatile data so the order
820 * of accesses cannot be reordered by the compiler. If they were
821 * not volatile, they could be reordered which could lead to
822 * using invalid descriptor fields when read from rxd.
824 rxdp = &rx_ring[rx_id];
825 staterr = rxdp->wb.upper.status_error;
826 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
833 * Allocate a new mbuf to replenish the RX ring descriptor.
834 * If the allocation fails:
835 * - arrange for that RX descriptor to be the first one
836 * being parsed the next time the receive function is
837 * invoked [on the same queue].
839 * - Stop parsing the RX ring and return immediately.
841 * This policy does not drop the packet received in the RX
842 * descriptor for which the allocation of a new mbuf failed.
843 * Thus, it allows that packet to be later retrieved if
844 * mbuf have been freed in the mean time.
845 * As a side effect, holding RX descriptors instead of
846 * systematically giving them back to the NIC may lead to
847 * RX ring exhaustion situations.
848 * However, the NIC can gracefully prevent such situations
849 * to happen by sending specific "back-pressure" flow control
850 * frames to its peer(s).
852 PMD_RX_LOG(DEBUG, "\nport_id=%u queue_id=%u rx_id=%u "
853 "staterr=0x%x data_len=%u\n",
854 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
855 (unsigned) rx_id, (unsigned) staterr,
856 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
858 nmb = rte_rxmbuf_alloc(rxq->mb_pool);
860 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
861 "queue_id=%u\n", (unsigned) rxq->port_id,
862 (unsigned) rxq->queue_id);
863 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
868 rxe = &sw_ring[rx_id];
870 if (rx_id == rxq->nb_rx_desc)
873 /* Prefetch next mbuf while processing current one. */
874 rte_igb_prefetch(sw_ring[rx_id].mbuf);
877 * When next RX descriptor is on a cache-line boundary,
878 * prefetch the next 4 RX descriptors and the next 8 pointers
881 if ((rx_id & 0x3) == 0) {
882 rte_igb_prefetch(&rx_ring[rx_id]);
883 rte_igb_prefetch(&sw_ring[rx_id]);
887 * Update RX descriptor with the physical address of the new
888 * data buffer of the new allocated mbuf.
892 dma = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(nmb));
893 rxdp->read.pkt_addr = dma;
894 rxdp->read.hdr_addr = dma;
897 * Set data length & data buffer address of mbuf.
899 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
900 rxm->pkt.data_len = data_len;
901 rxm->pkt.data = (char*) rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
904 * If this is the first buffer of the received packet,
905 * set the pointer to the first mbuf of the packet and
906 * initialize its context.
907 * Otherwise, update the total length and the number of segments
908 * of the current scattered packet, and update the pointer to
909 * the last mbuf of the current packet.
911 if (first_seg == NULL) {
913 first_seg->pkt.pkt_len = data_len;
914 first_seg->pkt.nb_segs = 1;
916 first_seg->pkt.pkt_len += data_len;
917 first_seg->pkt.nb_segs++;
918 last_seg->pkt.next = rxm;
922 * If this is not the last buffer of the received packet,
923 * update the pointer to the last mbuf of the current scattered
924 * packet and continue to parse the RX ring.
926 if (! (staterr & E1000_RXD_STAT_EOP)) {
932 * This is the last buffer of the received packet.
933 * If the CRC is not stripped by the hardware:
934 * - Subtract the CRC length from the total packet length.
935 * - If the last buffer only contains the whole CRC or a part
936 * of it, free the mbuf associated to the last buffer.
937 * If part of the CRC is also contained in the previous
938 * mbuf, subtract the length of that CRC part from the
939 * data length of the previous mbuf.
941 rxm->pkt.next = NULL;
942 if (unlikely(rxq->crc_len > 0)) {
943 first_seg->pkt.pkt_len -= ETHER_CRC_LEN;
944 if (data_len <= ETHER_CRC_LEN) {
945 rte_pktmbuf_free_seg(rxm);
946 first_seg->pkt.nb_segs--;
947 last_seg->pkt.data_len = (uint16_t)
948 (last_seg->pkt.data_len -
949 (ETHER_CRC_LEN - data_len));
950 last_seg->pkt.next = NULL;
953 (uint16_t) (data_len - ETHER_CRC_LEN);
957 * Initialize the first mbuf of the returned packet:
958 * - RX port identifier,
959 * - hardware offload data, if any:
961 * - IP checksum flag,
962 * - VLAN TCI, if any,
965 first_seg->pkt.in_port = rxq->port_id;
966 first_seg->pkt.hash.rss = rxd.wb.lower.hi_dword.rss;
969 * The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
970 * set in the pkt_flags field.
972 first_seg->pkt.vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
973 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
974 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
975 pkt_flags = (pkt_flags | rx_desc_status_to_pkt_flags(staterr));
976 pkt_flags = (pkt_flags | rx_desc_error_to_pkt_flags(staterr));
977 first_seg->ol_flags = pkt_flags;
979 /* Prefetch data of first segment, if configured to do so. */
980 rte_packet_prefetch(first_seg->pkt.data);
983 * Store the mbuf address into the next entry of the array
984 * of returned packets.
986 rx_pkts[nb_rx++] = first_seg;
989 * Setup receipt context for a new packet.
995 * Record index of the next RX descriptor to probe.
997 rxq->rx_tail = rx_id;
1000 * Save receive context.
1002 rxq->pkt_first_seg = first_seg;
1003 rxq->pkt_last_seg = last_seg;
1006 * If the number of free RX descriptors is greater than the RX free
1007 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1009 * Update the RDT with the value of the last processed RX descriptor
1010 * minus 1, to guarantee that the RDT register is never equal to the
1011 * RDH register, which creates a "full" ring situtation from the
1012 * hardware point of view...
1014 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1015 if (nb_hold > rxq->rx_free_thresh) {
1016 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1017 "nb_hold=%u nb_rx=%u\n",
1018 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1019 (unsigned) rx_id, (unsigned) nb_hold,
1021 rx_id = (uint16_t) ((rx_id == 0) ?
1022 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1023 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1026 rxq->nb_rx_hold = nb_hold;
1031 * Rings setup and release.
1033 * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be
1034 * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary.
1035 * This will also optimize cache line size effect.
1036 * H/W supports up to cache line size 128.
1038 #define IGB_ALIGN 128
1041 * Maximum number of Ring Descriptors.
1043 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1044 * desscriptors should meet the following condition:
1045 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1047 #define IGB_MIN_RING_DESC 32
1048 #define IGB_MAX_RING_DESC 4096
1050 static const struct rte_memzone *
1051 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
1052 uint16_t queue_id, uint32_t ring_size, int socket_id)
1054 char z_name[RTE_MEMZONE_NAMESIZE];
1055 const struct rte_memzone *mz;
1057 rte_snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
1058 dev->driver->pci_drv.name, ring_name,
1059 dev->data->port_id, queue_id);
1060 mz = rte_memzone_lookup(z_name);
1064 return rte_memzone_reserve_aligned(z_name, (uint64_t)ring_size,
1065 socket_id, 0, IGB_ALIGN);
1069 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1073 if (txq->sw_ring != NULL) {
1074 for (i = 0; i < txq->nb_tx_desc; i++) {
1075 if (txq->sw_ring[i].mbuf != NULL) {
1076 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1077 txq->sw_ring[i].mbuf = NULL;
1084 igb_tx_queue_release(struct igb_tx_queue *txq)
1086 igb_tx_queue_release_mbufs(txq);
1087 rte_free(txq->sw_ring);
1092 igb_dev_tx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
1094 uint16_t i, old_nb_queues = dev->data->nb_tx_queues;
1095 struct igb_tx_queue **txq;
1097 if (dev->data->tx_queues == NULL) {
1098 dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
1099 sizeof(struct igb_tx_queue *) * nb_queues,
1101 if (dev->data->tx_queues == NULL) {
1102 dev->data->nb_tx_queues = 0;
1106 if (nb_queues < old_nb_queues)
1107 for (i = nb_queues; i < old_nb_queues; i++)
1108 igb_tx_queue_release(dev->data->tx_queues[i]);
1110 if (nb_queues != old_nb_queues) {
1111 txq = rte_realloc(dev->data->tx_queues,
1112 sizeof(struct igb_tx_queue *) * nb_queues,
1117 dev->data->tx_queues = txq;
1118 if (nb_queues > old_nb_queues)
1119 memset(&(txq[old_nb_queues]), 0,
1120 sizeof(struct igb_tx_queue *) *
1121 (nb_queues - old_nb_queues));
1124 dev->data->nb_tx_queues = nb_queues;
1130 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1135 memset((void*)&txq->ctx_cache, 0,
1136 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1140 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1142 struct igb_tx_entry *txe = txq->sw_ring;
1145 struct e1000_hw *hw;
1147 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1148 size = sizeof(union e1000_adv_tx_desc) * txq->nb_tx_desc;
1149 /* Zero out HW ring memory */
1150 for (i = 0; i < size; i++) {
1151 ((volatile char *)txq->tx_ring)[i] = 0;
1154 /* Initialize ring entries */
1155 prev = txq->nb_tx_desc - 1;
1156 for (i = 0; i < txq->nb_tx_desc; i++) {
1157 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1159 txd->wb.status = E1000_TXD_STAT_DD;
1162 txe[prev].next_id = i;
1166 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1167 /* 82575 specific, each tx queue will use 2 hw contexts */
1168 if (hw->mac.type == e1000_82575)
1169 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1171 igb_reset_tx_queue_stat(txq);
1175 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1178 unsigned int socket_id,
1179 const struct rte_eth_txconf *tx_conf)
1181 const struct rte_memzone *tz;
1182 struct igb_tx_queue *txq;
1183 struct e1000_hw *hw;
1186 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1189 * Validate number of transmit descriptors.
1190 * It must not exceed hardware maximum, and must be multiple
1193 if (((nb_desc * sizeof(union e1000_adv_tx_desc)) % IGB_ALIGN) != 0 ||
1194 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1199 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1202 if (tx_conf->tx_free_thresh != 0)
1203 RTE_LOG(WARNING, PMD,
1204 "The tx_free_thresh parameter is not "
1205 "used for the 1G driver.");
1206 if (tx_conf->tx_rs_thresh != 0)
1207 RTE_LOG(WARNING, PMD,
1208 "The tx_rs_thresh parameter is not "
1209 "used for the 1G driver.");
1210 if (tx_conf->tx_thresh.wthresh == 0)
1211 RTE_LOG(WARNING, PMD,
1212 "To improve 1G driver performance, consider setting "
1213 "the TX WTHRESH value to 4, 8, or 16.");
1215 /* Free memory prior to re-allocation if needed */
1216 if (dev->data->tx_queues[queue_idx] != NULL)
1217 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1219 /* First allocate the tx queue data structure */
1220 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1226 * Allocate TX ring hardware descriptors. A memzone large enough to
1227 * handle the maximum ring size is allocated in order to allow for
1228 * resizing in later calls to the queue setup function.
1230 size = sizeof(union e1000_adv_tx_desc) * IGB_MAX_RING_DESC;
1231 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1234 igb_tx_queue_release(txq);
1238 txq->nb_tx_desc = nb_desc;
1239 txq->pthresh = tx_conf->tx_thresh.pthresh;
1240 txq->hthresh = tx_conf->tx_thresh.hthresh;
1241 txq->wthresh = tx_conf->tx_thresh.wthresh;
1242 txq->queue_id = queue_idx;
1243 txq->port_id = dev->data->port_id;
1245 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(queue_idx));
1246 txq->tx_ring_phys_addr = (uint64_t) tz->phys_addr;
1247 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1249 size = sizeof(union e1000_adv_tx_desc) * nb_desc;
1251 /* Allocate software ring */
1252 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1253 sizeof(struct igb_tx_entry) * nb_desc,
1255 if (txq->sw_ring == NULL) {
1256 igb_tx_queue_release(txq);
1259 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1260 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1262 igb_reset_tx_queue(txq, dev);
1263 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1264 dev->data->tx_queues[queue_idx] = txq;
1270 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1274 if (rxq->sw_ring != NULL) {
1275 for (i = 0; i < rxq->nb_rx_desc; i++) {
1276 if (rxq->sw_ring[i].mbuf != NULL) {
1277 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1278 rxq->sw_ring[i].mbuf = NULL;
1285 igb_rx_queue_release(struct igb_rx_queue *rxq)
1287 igb_rx_queue_release_mbufs(rxq);
1288 rte_free(rxq->sw_ring);
1293 igb_dev_rx_queue_alloc(struct rte_eth_dev *dev, uint16_t nb_queues)
1295 uint16_t i, old_nb_queues = dev->data->nb_rx_queues;
1296 struct igb_rx_queue **rxq;
1298 if (dev->data->rx_queues == NULL) {
1299 dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
1300 sizeof(struct igb_rx_queue *) * nb_queues,
1302 if (dev->data->rx_queues == NULL) {
1303 dev->data->nb_rx_queues = 0;
1307 for (i = nb_queues; i < old_nb_queues; i++) {
1308 igb_rx_queue_release(dev->data->rx_queues[i]);
1309 dev->data->rx_queues[i] = NULL;
1311 if (nb_queues != old_nb_queues) {
1312 rxq = rte_realloc(dev->data->rx_queues,
1313 sizeof(struct igb_rx_queue *) * nb_queues,
1318 dev->data->rx_queues = rxq;
1319 if (nb_queues > old_nb_queues)
1320 memset(&(rxq[old_nb_queues]), 0,
1321 sizeof(struct igb_rx_queue *) *
1322 (nb_queues - old_nb_queues));
1325 dev->data->nb_rx_queues = nb_queues;
1331 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1336 /* Zero out HW ring memory */
1337 size = sizeof(union e1000_adv_rx_desc) * rxq->nb_rx_desc;
1338 for (i = 0; i < size; i++) {
1339 ((volatile char *)rxq->rx_ring)[i] = 0;
1343 rxq->pkt_first_seg = NULL;
1344 rxq->pkt_last_seg = NULL;
1348 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1351 unsigned int socket_id,
1352 const struct rte_eth_rxconf *rx_conf,
1353 struct rte_mempool *mp)
1355 const struct rte_memzone *rz;
1356 struct igb_rx_queue *rxq;
1357 struct e1000_hw *hw;
1360 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1363 * Validate number of receive descriptors.
1364 * It must not exceed hardware maximum, and must be multiple
1367 if (((nb_desc * sizeof(union e1000_adv_rx_desc)) % IGB_ALIGN) != 0 ||
1368 (nb_desc > IGB_MAX_RING_DESC) || (nb_desc < IGB_MIN_RING_DESC)) {
1372 /* Free memory prior to re-allocation if needed */
1373 if (dev->data->rx_queues[queue_idx] != NULL) {
1374 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1375 dev->data->rx_queues[queue_idx] = NULL;
1378 /* First allocate the RX queue data structure. */
1379 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1384 rxq->nb_rx_desc = nb_desc;
1385 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1386 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1387 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1388 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1389 rxq->queue_id = queue_idx;
1390 rxq->port_id = dev->data->port_id;
1391 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0 :
1395 * Allocate RX ring hardware descriptors. A memzone large enough to
1396 * handle the maximum ring size is allocated in order to allow for
1397 * resizing in later calls to the queue setup function.
1399 size = sizeof(union e1000_adv_rx_desc) * IGB_MAX_RING_DESC;
1400 rz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx, size, socket_id);
1402 igb_rx_queue_release(rxq);
1405 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(queue_idx));
1406 rxq->rx_ring_phys_addr = (uint64_t) rz->phys_addr;
1407 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1409 /* Allocate software ring. */
1410 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1411 sizeof(struct igb_rx_entry) * nb_desc,
1413 if (rxq->sw_ring == NULL) {
1414 igb_rx_queue_release(rxq);
1417 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64"\n",
1418 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1420 dev->data->rx_queues[queue_idx] = rxq;
1421 igb_reset_rx_queue(rxq);
1427 igb_dev_clear_queues(struct rte_eth_dev *dev)
1430 struct igb_tx_queue *txq;
1431 struct igb_rx_queue *rxq;
1433 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1434 txq = dev->data->tx_queues[i];
1435 igb_tx_queue_release_mbufs(txq);
1436 igb_reset_tx_queue(txq, dev);
1439 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1440 rxq = dev->data->rx_queues[i];
1441 igb_rx_queue_release_mbufs(rxq);
1442 igb_reset_rx_queue(rxq);
1447 * Receive Side Scaling (RSS).
1448 * See section 7.1.1.7 in the following document:
1449 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1452 * The source and destination IP addresses of the IP header and the source and
1453 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1454 * against a configurable random key to compute a 32-bit RSS hash result.
1455 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1456 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1457 * RSS output index which is used as the RX queue index where to store the
1459 * The following output is supplied in the RX write-back descriptor:
1460 * - 32-bit result of the Microsoft RSS hash function,
1461 * - 4-bit RSS type field.
1465 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1466 * Used as the default key.
1468 static uint8_t rss_intel_key[40] = {
1469 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1470 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1471 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1472 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1473 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1477 igb_rss_disable(struct rte_eth_dev *dev)
1479 struct e1000_hw *hw;
1482 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1483 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1484 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1485 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1489 igb_rss_configure(struct rte_eth_dev *dev)
1491 struct e1000_hw *hw;
1499 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1501 rss_hf = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf;
1502 if (rss_hf == 0) /* Disable RSS. */ {
1503 igb_rss_disable(dev);
1506 hash_key = dev->data->dev_conf.rx_adv_conf.rss_conf.rss_key;
1507 if (hash_key == NULL)
1508 hash_key = rss_intel_key; /* Default hash key. */
1510 /* Fill in RSS hash key. */
1511 for (i = 0; i < 10; i++) {
1512 rss_key = hash_key[(i * 4)];
1513 rss_key |= hash_key[(i * 4) + 1] << 8;
1514 rss_key |= hash_key[(i * 4) + 2] << 16;
1515 rss_key |= hash_key[(i * 4) + 3] << 24;
1516 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1519 /* Fill in redirection table. */
1520 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
1521 for (i = 0; i < 128; i++) {
1528 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
1529 i % dev->data->nb_rx_queues : 0);
1530 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
1532 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
1535 /* Set configured hashing functions in MRQC register. */
1536 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1537 if (rss_hf & ETH_RSS_IPV4)
1538 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1539 if (rss_hf & ETH_RSS_IPV4_TCP)
1540 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1541 if (rss_hf & ETH_RSS_IPV6)
1542 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1543 if (rss_hf & ETH_RSS_IPV6_EX)
1544 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1545 if (rss_hf & ETH_RSS_IPV6_TCP)
1546 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1547 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1548 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1549 if (rss_hf & ETH_RSS_IPV4_UDP)
1550 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1551 if (rss_hf & ETH_RSS_IPV6_UDP)
1552 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1553 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1554 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1555 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1558 /*********************************************************************
1560 * Enable receive unit.
1562 **********************************************************************/
1565 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
1567 struct igb_rx_entry *rxe = rxq->sw_ring;
1571 /* Initialize software ring entries. */
1572 for (i = 0; i < rxq->nb_rx_desc; i++) {
1573 volatile union e1000_adv_rx_desc *rxd;
1574 struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
1577 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
1578 "queue_id=%hu\n", rxq->queue_id);
1579 igb_rx_queue_release(rxq);
1583 rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf));
1584 rxd = &rxq->rx_ring[i];
1585 rxd->read.hdr_addr = dma_addr;
1586 rxd->read.pkt_addr = dma_addr;
1594 eth_igb_rx_init(struct rte_eth_dev *dev)
1596 struct e1000_hw *hw;
1597 struct igb_rx_queue *rxq;
1598 struct rte_pktmbuf_pool_private *mbp_priv;
1603 uint16_t rctl_bsize;
1607 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1611 * Make sure receives are disabled while setting
1612 * up the descriptor ring.
1614 rctl = E1000_READ_REG(hw, E1000_RCTL);
1615 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
1618 * Configure support of jumbo frames, if any.
1620 if (dev->data->dev_conf.rxmode.jumbo_frame == 1) {
1621 rctl |= E1000_RCTL_LPE;
1623 /* Set maximum packet length. */
1624 E1000_WRITE_REG(hw, E1000_RLPML,
1625 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1627 rctl &= ~E1000_RCTL_LPE;
1629 /* Configure and enable each RX queue. */
1631 dev->rx_pkt_burst = eth_igb_recv_pkts;
1632 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1636 rxq = dev->data->rx_queues[i];
1638 /* Allocate buffers for descriptor rings and set up queue */
1639 ret = igb_alloc_rx_queue_mbufs(rxq);
1641 igb_dev_clear_queues(dev);
1646 * Reset crc_len in case it was changed after queue setup by a
1650 (uint8_t)(dev->data->dev_conf.rxmode.hw_strip_crc ?
1653 bus_addr = rxq->rx_ring_phys_addr;
1654 E1000_WRITE_REG(hw, E1000_RDLEN(i),
1656 sizeof(union e1000_adv_rx_desc));
1657 E1000_WRITE_REG(hw, E1000_RDBAH(i),
1658 (uint32_t)(bus_addr >> 32));
1659 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
1661 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
1664 * Configure RX buffer size.
1666 mbp_priv = (struct rte_pktmbuf_pool_private *)
1667 ((char *)rxq->mb_pool + sizeof(struct rte_mempool));
1668 buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
1669 RTE_PKTMBUF_HEADROOM);
1670 if (buf_size >= 1024) {
1672 * Configure the BSIZEPACKET field of the SRRCTL
1673 * register of the queue.
1674 * Value is in 1 KB resolution, from 1 KB to 127 KB.
1675 * If this field is equal to 0b, then RCTL.BSIZE
1676 * determines the RX packet buffer size.
1678 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
1679 E1000_SRRCTL_BSIZEPKT_MASK);
1680 buf_size = (uint16_t) ((srrctl &
1681 E1000_SRRCTL_BSIZEPKT_MASK) <<
1682 E1000_SRRCTL_BSIZEPKT_SHIFT);
1684 if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size){
1685 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1686 dev->data->scattered_rx = 1;
1690 * Use BSIZE field of the device RCTL register.
1692 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
1693 rctl_bsize = buf_size;
1694 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
1695 dev->data->scattered_rx = 1;
1698 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
1700 /* Enable this RX queue. */
1701 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
1702 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
1703 rxdctl &= 0xFFF00000;
1704 rxdctl |= (rxq->pthresh & 0x1F);
1705 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
1706 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
1707 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
1711 * Setup BSIZE field of RCTL register, if needed.
1712 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1713 * register, since the code above configures the SRRCTL register of
1714 * the RX queue in such a case.
1715 * All configurable sizes are:
1716 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
1717 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
1718 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
1719 * 2048: rctl |= E1000_RCTL_SZ_2048;
1720 * 1024: rctl |= E1000_RCTL_SZ_1024;
1721 * 512: rctl |= E1000_RCTL_SZ_512;
1722 * 256: rctl |= E1000_RCTL_SZ_256;
1724 if (rctl_bsize > 0) {
1725 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1726 rctl |= E1000_RCTL_SZ_512;
1727 else /* 256 <= buf_size < 512 - use 256 */
1728 rctl |= E1000_RCTL_SZ_256;
1732 * Configure RSS if device configured with multiple RX queues.
1734 if (dev->data->nb_rx_queues > 1)
1735 igb_rss_configure(dev);
1737 igb_rss_disable(dev);
1740 * Setup the Checksum Register.
1741 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1743 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
1744 rxcsum |= E1000_RXCSUM_PCSD;
1746 /* Enable both L3/L4 rx checksum offload */
1747 if (dev->data->dev_conf.rxmode.hw_ip_checksum)
1748 rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1750 rxcsum &= ~(E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL);
1751 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
1753 /* Setup the Receive Control Register. */
1754 if (dev->data->dev_conf.rxmode.hw_strip_crc) {
1755 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
1757 /* set STRCRC bit in all queues for Powerville */
1758 if (hw->mac.type == e1000_i350) {
1759 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1760 uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
1761 dvmolr |= E1000_DVMOLR_STRCRC;
1762 E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
1767 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1769 /* clear STRCRC bit in all queues for Powerville */
1770 if (hw->mac.type == e1000_i350) {
1771 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1772 uint32_t dvmolr = E1000_READ_REG(hw, E1000_DVMOLR(i));
1773 dvmolr &= ~E1000_DVMOLR_STRCRC;
1774 E1000_WRITE_REG(hw, E1000_DVMOLR(i), dvmolr);
1779 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1780 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1781 E1000_RCTL_RDMTS_HALF |
1782 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1784 /* Make sure VLAN Filters are off. */
1785 rctl &= ~E1000_RCTL_VFE;
1786 /* Don't store bad packets. */
1787 rctl &= ~E1000_RCTL_SBP;
1789 /* Enable Receives. */
1790 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
1793 * Setup the HW Rx Head and Tail Descriptor Pointers.
1794 * This needs to be done after enable.
1796 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1797 rxq = dev->data->rx_queues[i];
1798 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
1799 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
1805 /*********************************************************************
1807 * Enable transmit unit.
1809 **********************************************************************/
1811 eth_igb_tx_init(struct rte_eth_dev *dev)
1813 struct e1000_hw *hw;
1814 struct igb_tx_queue *txq;
1819 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1821 /* Setup the Base and Length of the Tx Descriptor Rings. */
1822 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1824 txq = dev->data->tx_queues[i];
1825 bus_addr = txq->tx_ring_phys_addr;
1827 E1000_WRITE_REG(hw, E1000_TDLEN(i),
1829 sizeof(union e1000_adv_tx_desc));
1830 E1000_WRITE_REG(hw, E1000_TDBAH(i),
1831 (uint32_t)(bus_addr >> 32));
1832 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
1834 /* Setup the HW Tx Head and Tail descriptor pointers. */
1835 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
1836 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
1838 /* Setup Transmit threshold registers. */
1839 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
1840 txdctl |= txq->pthresh & 0x1F;
1841 txdctl |= ((txq->hthresh & 0x1F) << 8);
1842 txdctl |= ((txq->wthresh & 0x1F) << 16);
1843 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1844 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
1847 /* Program the Transmit Control Register. */
1848 tctl = E1000_READ_REG(hw, E1000_TCTL);
1849 tctl &= ~E1000_TCTL_CT;
1850 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
1851 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
1853 e1000_config_collision_dist(hw);
1855 /* This write will effectively turn on the transmit unit. */
1856 E1000_WRITE_REG(hw, E1000_TCTL, tctl);