1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
40 #include <rte_string_fns.h>
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
49 #define IGB_TX_IEEE1588_TMST 0
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
53 RTE_MBUF_F_TX_OUTER_IPV4 | \
54 RTE_MBUF_F_TX_IPV6 | \
55 RTE_MBUF_F_TX_IPV4 | \
56 RTE_MBUF_F_TX_VLAN | \
57 RTE_MBUF_F_TX_IP_CKSUM | \
58 RTE_MBUF_F_TX_L4_MASK | \
59 RTE_MBUF_F_TX_TCP_SEG | \
62 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
63 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
66 * Structure associated with each descriptor of the RX ring of a RX queue.
69 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
73 * Structure associated with each descriptor of the TX ring of a TX queue.
76 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
77 uint16_t next_id; /**< Index of next descriptor in ring. */
78 uint16_t last_id; /**< Index of last scattered descriptor. */
85 IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
89 * Structure associated with each RX queue.
92 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
93 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
94 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
95 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
96 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
97 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
98 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
99 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
100 uint16_t nb_rx_desc; /**< number of RX descriptors. */
101 uint16_t rx_tail; /**< current value of RDT register. */
102 uint16_t nb_rx_hold; /**< number of held free RX desc. */
103 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
104 uint16_t queue_id; /**< RX queue index. */
105 uint16_t reg_idx; /**< RX queue register index. */
106 uint16_t port_id; /**< Device port identifier. */
107 uint8_t pthresh; /**< Prefetch threshold register. */
108 uint8_t hthresh; /**< Host threshold register. */
109 uint8_t wthresh; /**< Write-back threshold register. */
110 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
111 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
112 uint32_t flags; /**< RX flags. */
113 uint64_t offloads; /**< offloads of RTE_ETH_RX_OFFLOAD_* */
114 const struct rte_memzone *mz;
118 * Hardware context number
120 enum igb_advctx_num {
121 IGB_CTX_0 = 0, /**< CTX0 */
122 IGB_CTX_1 = 1, /**< CTX1 */
123 IGB_CTX_NUM = 2, /**< CTX_NUM */
126 /** Offload features */
127 union igb_tx_offload {
130 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
131 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
132 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
133 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
134 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
136 /* uint64_t unused:8; */
141 * Compare mask for igb_tx_offload.data,
142 * should be in sync with igb_tx_offload layout.
144 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
145 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
146 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
147 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
148 /** Mac + IP + TCP + Mss mask. */
149 #define TX_TSO_CMP_MASK \
150 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
153 * Structure to check if new context need be built
155 struct igb_advctx_info {
156 uint64_t flags; /**< ol_flags related to context build. */
157 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
158 union igb_tx_offload tx_offload;
159 /** compare mask for tx offload. */
160 union igb_tx_offload tx_offload_mask;
164 * Structure associated with each TX queue.
166 struct igb_tx_queue {
167 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
168 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
169 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
170 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
171 uint32_t txd_type; /**< Device-specific TXD type */
172 uint16_t nb_tx_desc; /**< number of TX descriptors. */
173 uint16_t tx_tail; /**< Current value of TDT register. */
175 /**< Index of first used TX descriptor. */
176 uint16_t queue_id; /**< TX queue index. */
177 uint16_t reg_idx; /**< TX queue register index. */
178 uint16_t port_id; /**< Device port identifier. */
179 uint8_t pthresh; /**< Prefetch threshold register. */
180 uint8_t hthresh; /**< Host threshold register. */
181 uint8_t wthresh; /**< Write-back threshold register. */
183 /**< Current used hardware descriptor. */
185 /**< Start context position for transmit queue. */
186 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
187 /**< Hardware context history.*/
188 uint64_t offloads; /**< offloads of RTE_ETH_TX_OFFLOAD_* */
189 const struct rte_memzone *mz;
193 #define RTE_PMD_USE_PREFETCH
196 #ifdef RTE_PMD_USE_PREFETCH
197 #define rte_igb_prefetch(p) rte_prefetch0(p)
199 #define rte_igb_prefetch(p) do {} while(0)
202 #ifdef RTE_PMD_PACKET_PREFETCH
203 #define rte_packet_prefetch(p) rte_prefetch1(p)
205 #define rte_packet_prefetch(p) do {} while(0)
209 * Macro for VMDq feature for 1 GbE NIC.
211 #define E1000_VMOLR_SIZE (8)
212 #define IGB_TSO_MAX_HDRLEN (512)
213 #define IGB_TSO_MAX_MSS (9216)
215 /*********************************************************************
219 **********************************************************************/
222 *There're some limitations in hardware for TCP segmentation offload. We
223 *should check whether the parameters are valid.
225 static inline uint64_t
226 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
228 if (!(ol_req & RTE_MBUF_F_TX_TCP_SEG))
230 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
231 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
232 ol_req &= ~RTE_MBUF_F_TX_TCP_SEG;
233 ol_req |= RTE_MBUF_F_TX_TCP_CKSUM;
239 * Advanced context descriptor are almost same between igb/ixgbe
240 * This is a separate function, looking for optimization opportunity here
241 * Rework required to go with the pre-defined values.
245 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
246 volatile struct e1000_adv_tx_context_desc *ctx_txd,
247 uint64_t ol_flags, union igb_tx_offload tx_offload)
249 uint32_t type_tucmd_mlhl;
250 uint32_t mss_l4len_idx;
251 uint32_t ctx_idx, ctx_curr;
252 uint32_t vlan_macip_lens;
253 union igb_tx_offload tx_offload_mask;
255 ctx_curr = txq->ctx_curr;
256 ctx_idx = ctx_curr + txq->ctx_start;
258 tx_offload_mask.data = 0;
261 /* Specify which HW CTX to upload. */
262 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
264 if (ol_flags & RTE_MBUF_F_TX_VLAN)
265 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
267 /* check if TCP segmentation required for this packet */
268 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
269 /* implies IP cksum in IPv4 */
270 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
271 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
272 E1000_ADVTXD_TUCMD_L4T_TCP |
273 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
275 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
276 E1000_ADVTXD_TUCMD_L4T_TCP |
277 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
279 tx_offload_mask.data |= TX_TSO_CMP_MASK;
280 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
281 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
282 } else { /* no TSO, check if hardware checksum is needed */
283 if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
284 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
286 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
287 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
289 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
290 case RTE_MBUF_F_TX_UDP_CKSUM:
291 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
292 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
293 mss_l4len_idx |= sizeof(struct rte_udp_hdr)
294 << E1000_ADVTXD_L4LEN_SHIFT;
296 case RTE_MBUF_F_TX_TCP_CKSUM:
297 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
298 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
299 mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
300 << E1000_ADVTXD_L4LEN_SHIFT;
302 case RTE_MBUF_F_TX_SCTP_CKSUM:
303 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
304 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
305 mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
306 << E1000_ADVTXD_L4LEN_SHIFT;
309 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
310 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
315 txq->ctx_cache[ctx_curr].flags = ol_flags;
316 txq->ctx_cache[ctx_curr].tx_offload.data =
317 tx_offload_mask.data & tx_offload.data;
318 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
320 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
321 vlan_macip_lens = (uint32_t)tx_offload.data;
322 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
323 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
324 ctx_txd->u.seqnum_seed = 0;
328 * Check which hardware context can be used. Use the existing match
329 * or create a new context descriptor.
331 static inline uint32_t
332 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
333 union igb_tx_offload tx_offload)
335 /* If match with the current context */
336 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
337 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
338 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
339 return txq->ctx_curr;
342 /* If match with the second context */
344 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
345 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
346 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
347 return txq->ctx_curr;
350 /* Mismatch, use the previous context */
354 static inline uint32_t
355 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
357 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
358 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
361 tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
362 tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
363 tmp |= l4_olinfo[(ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0];
367 static inline uint32_t
368 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
371 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
372 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
373 cmdtype = vlan_cmd[(ol_flags & RTE_MBUF_F_TX_VLAN) != 0];
374 cmdtype |= tso_cmd[(ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0];
379 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
382 struct igb_tx_queue *txq;
383 struct igb_tx_entry *sw_ring;
384 struct igb_tx_entry *txe, *txn;
385 volatile union e1000_adv_tx_desc *txr;
386 volatile union e1000_adv_tx_desc *txd;
387 struct rte_mbuf *tx_pkt;
388 struct rte_mbuf *m_seg;
389 uint64_t buf_dma_addr;
390 uint32_t olinfo_status;
391 uint32_t cmd_type_len;
400 uint32_t new_ctx = 0;
402 union igb_tx_offload tx_offload = {0};
405 sw_ring = txq->sw_ring;
407 tx_id = txq->tx_tail;
408 txe = &sw_ring[tx_id];
410 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
412 pkt_len = tx_pkt->pkt_len;
414 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
417 * The number of descriptors that must be allocated for a
418 * packet is the number of segments of that packet, plus 1
419 * Context Descriptor for the VLAN Tag Identifier, if any.
420 * Determine the last TX descriptor to allocate in the TX ring
421 * for the packet, starting from the current position (tx_id)
424 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
426 ol_flags = tx_pkt->ol_flags;
427 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
429 /* If a Context Descriptor need be built . */
431 tx_offload.l2_len = tx_pkt->l2_len;
432 tx_offload.l3_len = tx_pkt->l3_len;
433 tx_offload.l4_len = tx_pkt->l4_len;
434 tx_offload.vlan_tci = tx_pkt->vlan_tci;
435 tx_offload.tso_segsz = tx_pkt->tso_segsz;
436 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
438 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
439 /* Only allocate context descriptor if required*/
440 new_ctx = (ctx == IGB_CTX_NUM);
441 ctx = txq->ctx_curr + txq->ctx_start;
442 tx_last = (uint16_t) (tx_last + new_ctx);
444 if (tx_last >= txq->nb_tx_desc)
445 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
447 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
448 " tx_first=%u tx_last=%u",
449 (unsigned) txq->port_id,
450 (unsigned) txq->queue_id,
456 * Check if there are enough free descriptors in the TX ring
457 * to transmit the next packet.
458 * This operation is based on the two following rules:
460 * 1- Only check that the last needed TX descriptor can be
461 * allocated (by construction, if that descriptor is free,
462 * all intermediate ones are also free).
464 * For this purpose, the index of the last TX descriptor
465 * used for a packet (the "last descriptor" of a packet)
466 * is recorded in the TX entries (the last one included)
467 * that are associated with all TX descriptors allocated
470 * 2- Avoid to allocate the last free TX descriptor of the
471 * ring, in order to never set the TDT register with the
472 * same value stored in parallel by the NIC in the TDH
473 * register, which makes the TX engine of the NIC enter
474 * in a deadlock situation.
476 * By extension, avoid to allocate a free descriptor that
477 * belongs to the last set of free descriptors allocated
478 * to the same packet previously transmitted.
482 * The "last descriptor" of the previously sent packet, if any,
483 * which used the last descriptor to allocate.
485 tx_end = sw_ring[tx_last].last_id;
488 * The next descriptor following that "last descriptor" in the
491 tx_end = sw_ring[tx_end].next_id;
494 * The "last descriptor" associated with that next descriptor.
496 tx_end = sw_ring[tx_end].last_id;
499 * Check that this descriptor is free.
501 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
508 * Set common flags of all TX Data Descriptors.
510 * The following bits must be set in all Data Descriptors:
511 * - E1000_ADVTXD_DTYP_DATA
512 * - E1000_ADVTXD_DCMD_DEXT
514 * The following bits must be set in the first Data Descriptor
515 * and are ignored in the other ones:
516 * - E1000_ADVTXD_DCMD_IFCS
517 * - E1000_ADVTXD_MAC_1588
518 * - E1000_ADVTXD_DCMD_VLE
520 * The following bits must only be set in the last Data
522 * - E1000_TXD_CMD_EOP
524 * The following bits can be set in any Data Descriptor, but
525 * are only set in the last Data Descriptor:
528 cmd_type_len = txq->txd_type |
529 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
530 if (tx_ol_req & RTE_MBUF_F_TX_TCP_SEG)
531 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
532 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
533 #if defined(RTE_LIBRTE_IEEE1588)
534 if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
535 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
538 /* Setup TX Advanced context descriptor if required */
540 volatile struct e1000_adv_tx_context_desc *
543 ctx_txd = (volatile struct
544 e1000_adv_tx_context_desc *)
547 txn = &sw_ring[txe->next_id];
548 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
550 if (txe->mbuf != NULL) {
551 rte_pktmbuf_free_seg(txe->mbuf);
555 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
557 txe->last_id = tx_last;
558 tx_id = txe->next_id;
562 /* Setup the TX Advanced Data Descriptor */
563 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
564 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
565 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
570 txn = &sw_ring[txe->next_id];
573 if (txe->mbuf != NULL)
574 rte_pktmbuf_free_seg(txe->mbuf);
578 * Set up transmit descriptor.
580 slen = (uint16_t) m_seg->data_len;
581 buf_dma_addr = rte_mbuf_data_iova(m_seg);
582 txd->read.buffer_addr =
583 rte_cpu_to_le_64(buf_dma_addr);
584 txd->read.cmd_type_len =
585 rte_cpu_to_le_32(cmd_type_len | slen);
586 txd->read.olinfo_status =
587 rte_cpu_to_le_32(olinfo_status);
588 txe->last_id = tx_last;
589 tx_id = txe->next_id;
592 } while (m_seg != NULL);
595 * The last packet data descriptor needs End Of Packet (EOP)
596 * and Report Status (RS).
598 txd->read.cmd_type_len |=
599 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
605 * Set the Transmit Descriptor Tail (TDT).
607 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
608 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
609 (unsigned) txq->port_id, (unsigned) txq->queue_id,
610 (unsigned) tx_id, (unsigned) nb_tx);
611 txq->tx_tail = tx_id;
616 /*********************************************************************
620 **********************************************************************/
622 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
628 for (i = 0; i < nb_pkts; i++) {
631 /* Check some limitations for TSO in hardware */
632 if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
633 if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
634 (m->l2_len + m->l3_len + m->l4_len >
635 IGB_TSO_MAX_HDRLEN)) {
640 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
645 #ifdef RTE_ETHDEV_DEBUG_TX
646 ret = rte_validate_tx_offload(m);
652 ret = rte_net_intel_cksum_prepare(m);
662 /*********************************************************************
666 **********************************************************************/
667 #define IGB_PACKET_TYPE_IPV4 0X01
668 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
669 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
670 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
671 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
672 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
673 #define IGB_PACKET_TYPE_IPV6 0X04
674 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
675 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
676 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
677 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
678 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
679 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
680 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
681 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
682 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
683 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
684 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
685 #define IGB_PACKET_TYPE_MAX 0X80
686 #define IGB_PACKET_TYPE_MASK 0X7F
687 #define IGB_PACKET_TYPE_SHIFT 0X04
688 static inline uint32_t
689 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
691 static const uint32_t
692 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
693 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
695 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
696 RTE_PTYPE_L3_IPV4_EXT,
697 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
699 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
700 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
701 RTE_PTYPE_INNER_L3_IPV6,
702 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
703 RTE_PTYPE_L3_IPV6_EXT,
704 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
705 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
706 RTE_PTYPE_INNER_L3_IPV6_EXT,
707 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
708 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
709 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
710 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
711 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
712 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
713 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
714 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
715 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
716 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
717 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
718 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
719 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
720 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
721 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
722 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
723 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
724 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
725 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
726 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
727 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
728 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
729 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
730 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
731 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
732 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
733 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
734 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
736 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
737 return RTE_PTYPE_UNKNOWN;
739 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
741 return ptype_table[pkt_info];
744 static inline uint64_t
745 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
747 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : RTE_MBUF_F_RX_RSS_HASH;
749 #if defined(RTE_LIBRTE_IEEE1588)
750 static uint32_t ip_pkt_etqf_map[8] = {
751 0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
755 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
756 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
758 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
759 if (hw->mac.type == e1000_i210)
760 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
762 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
770 static inline uint64_t
771 rx_desc_status_to_pkt_flags(uint32_t rx_status)
775 /* Check if VLAN present */
776 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
777 RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED : 0);
779 #if defined(RTE_LIBRTE_IEEE1588)
780 if (rx_status & E1000_RXD_STAT_TMST)
781 pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
786 static inline uint64_t
787 rx_desc_error_to_pkt_flags(uint32_t rx_status)
790 * Bit 30: IPE, IPv4 checksum error
791 * Bit 29: L4I, L4I integrity error
794 static uint64_t error_to_pkt_flags_map[4] = {
795 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
796 RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
797 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
798 RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
800 return error_to_pkt_flags_map[(rx_status >>
801 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
805 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
808 struct igb_rx_queue *rxq;
809 volatile union e1000_adv_rx_desc *rx_ring;
810 volatile union e1000_adv_rx_desc *rxdp;
811 struct igb_rx_entry *sw_ring;
812 struct igb_rx_entry *rxe;
813 struct rte_mbuf *rxm;
814 struct rte_mbuf *nmb;
815 union e1000_adv_rx_desc rxd;
818 uint32_t hlen_type_rss;
828 rx_id = rxq->rx_tail;
829 rx_ring = rxq->rx_ring;
830 sw_ring = rxq->sw_ring;
831 while (nb_rx < nb_pkts) {
833 * The order of operations here is important as the DD status
834 * bit must not be read after any other descriptor fields.
835 * rx_ring and rxdp are pointing to volatile data so the order
836 * of accesses cannot be reordered by the compiler. If they were
837 * not volatile, they could be reordered which could lead to
838 * using invalid descriptor fields when read from rxd.
840 rxdp = &rx_ring[rx_id];
841 staterr = rxdp->wb.upper.status_error;
842 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
849 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
850 * likely to be invalid and to be dropped by the various
851 * validation checks performed by the network stack.
853 * Allocate a new mbuf to replenish the RX ring descriptor.
854 * If the allocation fails:
855 * - arrange for that RX descriptor to be the first one
856 * being parsed the next time the receive function is
857 * invoked [on the same queue].
859 * - Stop parsing the RX ring and return immediately.
861 * This policy do not drop the packet received in the RX
862 * descriptor for which the allocation of a new mbuf failed.
863 * Thus, it allows that packet to be later retrieved if
864 * mbuf have been freed in the mean time.
865 * As a side effect, holding RX descriptors instead of
866 * systematically giving them back to the NIC may lead to
867 * RX ring exhaustion situations.
868 * However, the NIC can gracefully prevent such situations
869 * to happen by sending specific "back-pressure" flow control
870 * frames to its peer(s).
872 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
873 "staterr=0x%x pkt_len=%u",
874 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
875 (unsigned) rx_id, (unsigned) staterr,
876 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
878 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
880 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
881 "queue_id=%u", (unsigned) rxq->port_id,
882 (unsigned) rxq->queue_id);
883 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
888 rxe = &sw_ring[rx_id];
890 if (rx_id == rxq->nb_rx_desc)
893 /* Prefetch next mbuf while processing current one. */
894 rte_igb_prefetch(sw_ring[rx_id].mbuf);
897 * When next RX descriptor is on a cache-line boundary,
898 * prefetch the next 4 RX descriptors and the next 8 pointers
901 if ((rx_id & 0x3) == 0) {
902 rte_igb_prefetch(&rx_ring[rx_id]);
903 rte_igb_prefetch(&sw_ring[rx_id]);
909 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
910 rxdp->read.hdr_addr = 0;
911 rxdp->read.pkt_addr = dma_addr;
914 * Initialize the returned mbuf.
915 * 1) setup generic mbuf fields:
916 * - number of segments,
919 * - RX port identifier.
920 * 2) integrate hardware offload data, if any:
922 * - IP checksum flag,
923 * - VLAN TCI, if any,
926 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
928 rxm->data_off = RTE_PKTMBUF_HEADROOM;
929 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
932 rxm->pkt_len = pkt_len;
933 rxm->data_len = pkt_len;
934 rxm->port = rxq->port_id;
936 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
937 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
940 * The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
941 * set in the pkt_flags field and must be in CPU byte order.
943 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
944 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
945 rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
947 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
949 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
950 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
951 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
952 rxm->ol_flags = pkt_flags;
953 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
954 lo_dword.hs_rss.pkt_info);
957 * Store the mbuf address into the next entry of the array
958 * of returned packets.
960 rx_pkts[nb_rx++] = rxm;
962 rxq->rx_tail = rx_id;
965 * If the number of free RX descriptors is greater than the RX free
966 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
968 * Update the RDT with the value of the last processed RX descriptor
969 * minus 1, to guarantee that the RDT register is never equal to the
970 * RDH register, which creates a "full" ring situation from the
971 * hardware point of view...
973 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
974 if (nb_hold > rxq->rx_free_thresh) {
975 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
976 "nb_hold=%u nb_rx=%u",
977 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
978 (unsigned) rx_id, (unsigned) nb_hold,
980 rx_id = (uint16_t) ((rx_id == 0) ?
981 (rxq->nb_rx_desc - 1) : (rx_id - 1));
982 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
985 rxq->nb_rx_hold = nb_hold;
990 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
993 struct igb_rx_queue *rxq;
994 volatile union e1000_adv_rx_desc *rx_ring;
995 volatile union e1000_adv_rx_desc *rxdp;
996 struct igb_rx_entry *sw_ring;
997 struct igb_rx_entry *rxe;
998 struct rte_mbuf *first_seg;
999 struct rte_mbuf *last_seg;
1000 struct rte_mbuf *rxm;
1001 struct rte_mbuf *nmb;
1002 union e1000_adv_rx_desc rxd;
1003 uint64_t dma; /* Physical address of mbuf data buffer */
1005 uint32_t hlen_type_rss;
1015 rx_id = rxq->rx_tail;
1016 rx_ring = rxq->rx_ring;
1017 sw_ring = rxq->sw_ring;
1020 * Retrieve RX context of current packet, if any.
1022 first_seg = rxq->pkt_first_seg;
1023 last_seg = rxq->pkt_last_seg;
1025 while (nb_rx < nb_pkts) {
1028 * The order of operations here is important as the DD status
1029 * bit must not be read after any other descriptor fields.
1030 * rx_ring and rxdp are pointing to volatile data so the order
1031 * of accesses cannot be reordered by the compiler. If they were
1032 * not volatile, they could be reordered which could lead to
1033 * using invalid descriptor fields when read from rxd.
1035 rxdp = &rx_ring[rx_id];
1036 staterr = rxdp->wb.upper.status_error;
1037 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1044 * Allocate a new mbuf to replenish the RX ring descriptor.
1045 * If the allocation fails:
1046 * - arrange for that RX descriptor to be the first one
1047 * being parsed the next time the receive function is
1048 * invoked [on the same queue].
1050 * - Stop parsing the RX ring and return immediately.
1052 * This policy does not drop the packet received in the RX
1053 * descriptor for which the allocation of a new mbuf failed.
1054 * Thus, it allows that packet to be later retrieved if
1055 * mbuf have been freed in the mean time.
1056 * As a side effect, holding RX descriptors instead of
1057 * systematically giving them back to the NIC may lead to
1058 * RX ring exhaustion situations.
1059 * However, the NIC can gracefully prevent such situations
1060 * to happen by sending specific "back-pressure" flow control
1061 * frames to its peer(s).
1063 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1064 "staterr=0x%x data_len=%u",
1065 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1066 (unsigned) rx_id, (unsigned) staterr,
1067 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1069 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1071 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1072 "queue_id=%u", (unsigned) rxq->port_id,
1073 (unsigned) rxq->queue_id);
1074 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1079 rxe = &sw_ring[rx_id];
1081 if (rx_id == rxq->nb_rx_desc)
1084 /* Prefetch next mbuf while processing current one. */
1085 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1088 * When next RX descriptor is on a cache-line boundary,
1089 * prefetch the next 4 RX descriptors and the next 8 pointers
1092 if ((rx_id & 0x3) == 0) {
1093 rte_igb_prefetch(&rx_ring[rx_id]);
1094 rte_igb_prefetch(&sw_ring[rx_id]);
1098 * Update RX descriptor with the physical address of the new
1099 * data buffer of the new allocated mbuf.
1103 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1104 rxdp->read.pkt_addr = dma;
1105 rxdp->read.hdr_addr = 0;
1108 * Set data length & data buffer address of mbuf.
1110 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1111 rxm->data_len = data_len;
1112 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1115 * If this is the first buffer of the received packet,
1116 * set the pointer to the first mbuf of the packet and
1117 * initialize its context.
1118 * Otherwise, update the total length and the number of segments
1119 * of the current scattered packet, and update the pointer to
1120 * the last mbuf of the current packet.
1122 if (first_seg == NULL) {
1124 first_seg->pkt_len = data_len;
1125 first_seg->nb_segs = 1;
1127 first_seg->pkt_len += data_len;
1128 first_seg->nb_segs++;
1129 last_seg->next = rxm;
1133 * If this is not the last buffer of the received packet,
1134 * update the pointer to the last mbuf of the current scattered
1135 * packet and continue to parse the RX ring.
1137 if (! (staterr & E1000_RXD_STAT_EOP)) {
1143 * This is the last buffer of the received packet.
1144 * If the CRC is not stripped by the hardware:
1145 * - Subtract the CRC length from the total packet length.
1146 * - If the last buffer only contains the whole CRC or a part
1147 * of it, free the mbuf associated to the last buffer.
1148 * If part of the CRC is also contained in the previous
1149 * mbuf, subtract the length of that CRC part from the
1150 * data length of the previous mbuf.
1153 if (unlikely(rxq->crc_len > 0)) {
1154 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
1155 if (data_len <= RTE_ETHER_CRC_LEN) {
1156 rte_pktmbuf_free_seg(rxm);
1157 first_seg->nb_segs--;
1158 last_seg->data_len = (uint16_t)
1159 (last_seg->data_len -
1160 (RTE_ETHER_CRC_LEN - data_len));
1161 last_seg->next = NULL;
1163 rxm->data_len = (uint16_t)
1164 (data_len - RTE_ETHER_CRC_LEN);
1168 * Initialize the first mbuf of the returned packet:
1169 * - RX port identifier,
1170 * - hardware offload data, if any:
1171 * - RSS flag & hash,
1172 * - IP checksum flag,
1173 * - VLAN TCI, if any,
1176 first_seg->port = rxq->port_id;
1177 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1180 * The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
1181 * set in the pkt_flags field and must be in CPU byte order.
1183 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1184 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1185 first_seg->vlan_tci =
1186 rte_be_to_cpu_16(rxd.wb.upper.vlan);
1188 first_seg->vlan_tci =
1189 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1191 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1192 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1193 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1194 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1195 first_seg->ol_flags = pkt_flags;
1196 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1197 lower.lo_dword.hs_rss.pkt_info);
1199 /* Prefetch data of first segment, if configured to do so. */
1200 rte_packet_prefetch((char *)first_seg->buf_addr +
1201 first_seg->data_off);
1204 * Store the mbuf address into the next entry of the array
1205 * of returned packets.
1207 rx_pkts[nb_rx++] = first_seg;
1210 * Setup receipt context for a new packet.
1216 * Record index of the next RX descriptor to probe.
1218 rxq->rx_tail = rx_id;
1221 * Save receive context.
1223 rxq->pkt_first_seg = first_seg;
1224 rxq->pkt_last_seg = last_seg;
1227 * If the number of free RX descriptors is greater than the RX free
1228 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1230 * Update the RDT with the value of the last processed RX descriptor
1231 * minus 1, to guarantee that the RDT register is never equal to the
1232 * RDH register, which creates a "full" ring situation from the
1233 * hardware point of view...
1235 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1236 if (nb_hold > rxq->rx_free_thresh) {
1237 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1238 "nb_hold=%u nb_rx=%u",
1239 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1240 (unsigned) rx_id, (unsigned) nb_hold,
1242 rx_id = (uint16_t) ((rx_id == 0) ?
1243 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1244 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1247 rxq->nb_rx_hold = nb_hold;
1252 * Maximum number of Ring Descriptors.
1254 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1255 * descriptors should meet the following condition:
1256 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1260 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1264 if (txq->sw_ring != NULL) {
1265 for (i = 0; i < txq->nb_tx_desc; i++) {
1266 if (txq->sw_ring[i].mbuf != NULL) {
1267 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1268 txq->sw_ring[i].mbuf = NULL;
1275 igb_tx_queue_release(struct igb_tx_queue *txq)
1278 igb_tx_queue_release_mbufs(txq);
1279 rte_free(txq->sw_ring);
1280 rte_memzone_free(txq->mz);
1286 eth_igb_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1288 igb_tx_queue_release(dev->data->tx_queues[qid]);
1292 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1294 struct igb_tx_entry *sw_ring;
1295 volatile union e1000_adv_tx_desc *txr;
1296 uint16_t tx_first; /* First segment analyzed. */
1297 uint16_t tx_id; /* Current segment being processed. */
1298 uint16_t tx_last; /* Last segment in the current packet. */
1299 uint16_t tx_next; /* First segment of the next packet. */
1305 sw_ring = txq->sw_ring;
1308 /* tx_tail is the last sent packet on the sw_ring. Goto the end
1309 * of that packet (the last segment in the packet chain) and
1310 * then the next segment will be the start of the oldest segment
1311 * in the sw_ring. This is the first packet that will be
1312 * attempted to be freed.
1315 /* Get last segment in most recently added packet. */
1316 tx_first = sw_ring[txq->tx_tail].last_id;
1318 /* Get the next segment, which is the oldest segment in ring. */
1319 tx_first = sw_ring[tx_first].next_id;
1321 /* Set the current index to the first. */
1324 /* Loop through each packet. For each packet, verify that an
1325 * mbuf exists and that the last segment is free. If so, free
1329 tx_last = sw_ring[tx_id].last_id;
1331 if (sw_ring[tx_last].mbuf) {
1332 if (txr[tx_last].wb.status &
1333 E1000_TXD_STAT_DD) {
1334 /* Increment the number of packets
1339 /* Get the start of the next packet. */
1340 tx_next = sw_ring[tx_last].next_id;
1342 /* Loop through all segments in a
1346 if (sw_ring[tx_id].mbuf) {
1347 rte_pktmbuf_free_seg(
1348 sw_ring[tx_id].mbuf);
1349 sw_ring[tx_id].mbuf = NULL;
1350 sw_ring[tx_id].last_id = tx_id;
1353 /* Move to next segment. */
1354 tx_id = sw_ring[tx_id].next_id;
1356 } while (tx_id != tx_next);
1358 if (unlikely(count == (int)free_cnt))
1361 /* mbuf still in use, nothing left to
1367 /* There are multiple reasons to be here:
1368 * 1) All the packets on the ring have been
1369 * freed - tx_id is equal to tx_first
1370 * and some packets have been freed.
1372 * 2) Interfaces has not sent a rings worth of
1373 * packets yet, so the segment after tail is
1374 * still empty. Or a previous call to this
1375 * function freed some of the segments but
1376 * not all so there is a hole in the list.
1377 * Hopefully this is a rare case.
1378 * - Walk the list and find the next mbuf. If
1379 * there isn't one, then done.
1381 if (likely(tx_id == tx_first && count != 0))
1384 /* Walk the list and find the next mbuf, if any. */
1386 /* Move to next segment. */
1387 tx_id = sw_ring[tx_id].next_id;
1389 if (sw_ring[tx_id].mbuf)
1392 } while (tx_id != tx_first);
1394 /* Determine why previous loop bailed. If there
1395 * is not an mbuf, done.
1397 if (!sw_ring[tx_id].mbuf)
1406 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1408 return igb_tx_done_cleanup(txq, free_cnt);
1412 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1417 memset((void*)&txq->ctx_cache, 0,
1418 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1422 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1424 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1425 struct igb_tx_entry *txe = txq->sw_ring;
1427 struct e1000_hw *hw;
1429 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1430 /* Zero out HW ring memory */
1431 for (i = 0; i < txq->nb_tx_desc; i++) {
1432 txq->tx_ring[i] = zeroed_desc;
1435 /* Initialize ring entries */
1436 prev = (uint16_t)(txq->nb_tx_desc - 1);
1437 for (i = 0; i < txq->nb_tx_desc; i++) {
1438 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1440 txd->wb.status = E1000_TXD_STAT_DD;
1443 txe[prev].next_id = i;
1447 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1448 /* 82575 specific, each tx queue will use 2 hw contexts */
1449 if (hw->mac.type == e1000_82575)
1450 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1452 igb_reset_tx_queue_stat(txq);
1456 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1458 uint64_t tx_offload_capa;
1461 tx_offload_capa = RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
1462 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
1463 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
1464 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
1465 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
1466 RTE_ETH_TX_OFFLOAD_TCP_TSO |
1467 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
1469 return tx_offload_capa;
1473 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1475 uint64_t tx_queue_offload_capa;
1477 tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1479 return tx_queue_offload_capa;
1483 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1486 unsigned int socket_id,
1487 const struct rte_eth_txconf *tx_conf)
1489 const struct rte_memzone *tz;
1490 struct igb_tx_queue *txq;
1491 struct e1000_hw *hw;
1495 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1497 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1500 * Validate number of transmit descriptors.
1501 * It must not exceed hardware maximum, and must be multiple
1504 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1505 (nb_desc > E1000_MAX_RING_DESC) ||
1506 (nb_desc < E1000_MIN_RING_DESC)) {
1511 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1514 if (tx_conf->tx_free_thresh != 0)
1515 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1516 "used for the 1G driver.");
1517 if (tx_conf->tx_rs_thresh != 0)
1518 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1519 "used for the 1G driver.");
1520 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1521 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1522 "consider setting the TX WTHRESH value to 4, 8, "
1525 /* Free memory prior to re-allocation if needed */
1526 if (dev->data->tx_queues[queue_idx] != NULL) {
1527 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1528 dev->data->tx_queues[queue_idx] = NULL;
1531 /* First allocate the tx queue data structure */
1532 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1533 RTE_CACHE_LINE_SIZE);
1538 * Allocate TX ring hardware descriptors. A memzone large enough to
1539 * handle the maximum ring size is allocated in order to allow for
1540 * resizing in later calls to the queue setup function.
1542 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1543 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1544 E1000_ALIGN, socket_id);
1546 igb_tx_queue_release(txq);
1551 txq->nb_tx_desc = nb_desc;
1552 txq->pthresh = tx_conf->tx_thresh.pthresh;
1553 txq->hthresh = tx_conf->tx_thresh.hthresh;
1554 txq->wthresh = tx_conf->tx_thresh.wthresh;
1555 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1557 txq->queue_id = queue_idx;
1558 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1559 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1560 txq->port_id = dev->data->port_id;
1562 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1563 txq->tx_ring_phys_addr = tz->iova;
1565 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1566 /* Allocate software ring */
1567 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1568 sizeof(struct igb_tx_entry) * nb_desc,
1569 RTE_CACHE_LINE_SIZE);
1570 if (txq->sw_ring == NULL) {
1571 igb_tx_queue_release(txq);
1574 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1575 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1577 igb_reset_tx_queue(txq, dev);
1578 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1579 dev->tx_pkt_prepare = ð_igb_prep_pkts;
1580 dev->data->tx_queues[queue_idx] = txq;
1581 txq->offloads = offloads;
1587 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1591 if (rxq->sw_ring != NULL) {
1592 for (i = 0; i < rxq->nb_rx_desc; i++) {
1593 if (rxq->sw_ring[i].mbuf != NULL) {
1594 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1595 rxq->sw_ring[i].mbuf = NULL;
1602 igb_rx_queue_release(struct igb_rx_queue *rxq)
1605 igb_rx_queue_release_mbufs(rxq);
1606 rte_free(rxq->sw_ring);
1607 rte_memzone_free(rxq->mz);
1613 eth_igb_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1615 igb_rx_queue_release(dev->data->rx_queues[qid]);
1619 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1621 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1624 /* Zero out HW ring memory */
1625 for (i = 0; i < rxq->nb_rx_desc; i++) {
1626 rxq->rx_ring[i] = zeroed_desc;
1630 rxq->pkt_first_seg = NULL;
1631 rxq->pkt_last_seg = NULL;
1635 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1637 uint64_t rx_offload_capa;
1638 struct e1000_hw *hw;
1640 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1642 rx_offload_capa = RTE_ETH_RX_OFFLOAD_VLAN_STRIP |
1643 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1644 RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1645 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1646 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
1647 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
1648 RTE_ETH_RX_OFFLOAD_SCATTER |
1649 RTE_ETH_RX_OFFLOAD_RSS_HASH;
1651 if (hw->mac.type == e1000_i350 ||
1652 hw->mac.type == e1000_i210 ||
1653 hw->mac.type == e1000_i211)
1654 rx_offload_capa |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
1656 return rx_offload_capa;
1660 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1662 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1663 uint64_t rx_queue_offload_capa;
1665 switch (hw->mac.type) {
1666 case e1000_vfadapt_i350:
1668 * As only one Rx queue can be used, let per queue offloading
1669 * capability be same to per port queue offloading capability
1670 * for better convenience.
1672 rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1675 rx_queue_offload_capa = 0;
1677 return rx_queue_offload_capa;
1681 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1684 unsigned int socket_id,
1685 const struct rte_eth_rxconf *rx_conf,
1686 struct rte_mempool *mp)
1688 const struct rte_memzone *rz;
1689 struct igb_rx_queue *rxq;
1690 struct e1000_hw *hw;
1694 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1696 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1699 * Validate number of receive descriptors.
1700 * It must not exceed hardware maximum, and must be multiple
1703 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1704 (nb_desc > E1000_MAX_RING_DESC) ||
1705 (nb_desc < E1000_MIN_RING_DESC)) {
1709 /* Free memory prior to re-allocation if needed */
1710 if (dev->data->rx_queues[queue_idx] != NULL) {
1711 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1712 dev->data->rx_queues[queue_idx] = NULL;
1715 /* First allocate the RX queue data structure. */
1716 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1717 RTE_CACHE_LINE_SIZE);
1720 rxq->offloads = offloads;
1722 rxq->nb_rx_desc = nb_desc;
1723 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1724 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1725 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1726 if (rxq->wthresh > 0 &&
1727 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1729 rxq->drop_en = rx_conf->rx_drop_en;
1730 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1731 rxq->queue_id = queue_idx;
1732 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1733 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1734 rxq->port_id = dev->data->port_id;
1735 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
1736 rxq->crc_len = RTE_ETHER_CRC_LEN;
1741 * Allocate RX ring hardware descriptors. A memzone large enough to
1742 * handle the maximum ring size is allocated in order to allow for
1743 * resizing in later calls to the queue setup function.
1745 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1746 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1747 E1000_ALIGN, socket_id);
1749 igb_rx_queue_release(rxq);
1754 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1755 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1756 rxq->rx_ring_phys_addr = rz->iova;
1757 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1759 /* Allocate software ring. */
1760 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1761 sizeof(struct igb_rx_entry) * nb_desc,
1762 RTE_CACHE_LINE_SIZE);
1763 if (rxq->sw_ring == NULL) {
1764 igb_rx_queue_release(rxq);
1767 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1768 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1770 dev->data->rx_queues[queue_idx] = rxq;
1771 igb_reset_rx_queue(rxq);
1777 eth_igb_rx_queue_count(void *rx_queue)
1779 #define IGB_RXQ_SCAN_INTERVAL 4
1780 volatile union e1000_adv_rx_desc *rxdp;
1781 struct igb_rx_queue *rxq;
1785 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1787 while ((desc < rxq->nb_rx_desc) &&
1788 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1789 desc += IGB_RXQ_SCAN_INTERVAL;
1790 rxdp += IGB_RXQ_SCAN_INTERVAL;
1791 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1792 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1793 desc - rxq->nb_rx_desc]);
1800 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1802 struct igb_rx_queue *rxq = rx_queue;
1803 volatile uint32_t *status;
1806 if (unlikely(offset >= rxq->nb_rx_desc))
1809 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1810 return RTE_ETH_RX_DESC_UNAVAIL;
1812 desc = rxq->rx_tail + offset;
1813 if (desc >= rxq->nb_rx_desc)
1814 desc -= rxq->nb_rx_desc;
1816 status = &rxq->rx_ring[desc].wb.upper.status_error;
1817 if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1818 return RTE_ETH_RX_DESC_DONE;
1820 return RTE_ETH_RX_DESC_AVAIL;
1824 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1826 struct igb_tx_queue *txq = tx_queue;
1827 volatile uint32_t *status;
1830 if (unlikely(offset >= txq->nb_tx_desc))
1833 desc = txq->tx_tail + offset;
1834 if (desc >= txq->nb_tx_desc)
1835 desc -= txq->nb_tx_desc;
1837 status = &txq->tx_ring[desc].wb.status;
1838 if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1839 return RTE_ETH_TX_DESC_DONE;
1841 return RTE_ETH_TX_DESC_FULL;
1845 igb_dev_clear_queues(struct rte_eth_dev *dev)
1848 struct igb_tx_queue *txq;
1849 struct igb_rx_queue *rxq;
1851 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1852 txq = dev->data->tx_queues[i];
1854 igb_tx_queue_release_mbufs(txq);
1855 igb_reset_tx_queue(txq, dev);
1859 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1860 rxq = dev->data->rx_queues[i];
1862 igb_rx_queue_release_mbufs(rxq);
1863 igb_reset_rx_queue(rxq);
1869 igb_dev_free_queues(struct rte_eth_dev *dev)
1873 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1874 eth_igb_rx_queue_release(dev, i);
1875 dev->data->rx_queues[i] = NULL;
1877 dev->data->nb_rx_queues = 0;
1879 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1880 eth_igb_tx_queue_release(dev, i);
1881 dev->data->tx_queues[i] = NULL;
1883 dev->data->nb_tx_queues = 0;
1887 * Receive Side Scaling (RSS).
1888 * See section 7.1.1.7 in the following document:
1889 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1892 * The source and destination IP addresses of the IP header and the source and
1893 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1894 * against a configurable random key to compute a 32-bit RSS hash result.
1895 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1896 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1897 * RSS output index which is used as the RX queue index where to store the
1899 * The following output is supplied in the RX write-back descriptor:
1900 * - 32-bit result of the Microsoft RSS hash function,
1901 * - 4-bit RSS type field.
1905 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1906 * Used as the default key.
1908 static uint8_t rss_intel_key[40] = {
1909 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1910 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1911 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1912 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1913 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1917 igb_rss_disable(struct rte_eth_dev *dev)
1919 struct e1000_hw *hw;
1922 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1923 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1924 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1925 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1929 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1937 hash_key = rss_conf->rss_key;
1938 if (hash_key != NULL) {
1939 /* Fill in RSS hash key */
1940 for (i = 0; i < 10; i++) {
1941 rss_key = hash_key[(i * 4)];
1942 rss_key |= hash_key[(i * 4) + 1] << 8;
1943 rss_key |= hash_key[(i * 4) + 2] << 16;
1944 rss_key |= hash_key[(i * 4) + 3] << 24;
1945 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1949 /* Set configured hashing protocols in MRQC register */
1950 rss_hf = rss_conf->rss_hf;
1951 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1952 if (rss_hf & RTE_ETH_RSS_IPV4)
1953 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1954 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
1955 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1956 if (rss_hf & RTE_ETH_RSS_IPV6)
1957 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1958 if (rss_hf & RTE_ETH_RSS_IPV6_EX)
1959 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1960 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
1961 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1962 if (rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
1963 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1964 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
1965 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1966 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
1967 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1968 if (rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
1969 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1970 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1974 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1975 struct rte_eth_rss_conf *rss_conf)
1977 struct e1000_hw *hw;
1981 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1984 * Before changing anything, first check that the update RSS operation
1985 * does not attempt to disable RSS, if RSS was enabled at
1986 * initialization time, or does not attempt to enable RSS, if RSS was
1987 * disabled at initialization time.
1989 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1990 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1991 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1992 if (rss_hf != 0) /* Enable RSS */
1994 return 0; /* Nothing to do */
1997 if (rss_hf == 0) /* Disable RSS */
1999 igb_hw_rss_hash_set(hw, rss_conf);
2003 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2004 struct rte_eth_rss_conf *rss_conf)
2006 struct e1000_hw *hw;
2013 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2014 hash_key = rss_conf->rss_key;
2015 if (hash_key != NULL) {
2016 /* Return RSS hash key */
2017 for (i = 0; i < 10; i++) {
2018 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2019 hash_key[(i * 4)] = rss_key & 0x000000FF;
2020 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2021 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2022 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2026 /* Get RSS functions configured in MRQC register */
2027 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2028 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2029 rss_conf->rss_hf = 0;
2033 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2034 rss_hf |= RTE_ETH_RSS_IPV4;
2035 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2036 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2037 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2038 rss_hf |= RTE_ETH_RSS_IPV6;
2039 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2040 rss_hf |= RTE_ETH_RSS_IPV6_EX;
2041 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2042 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP;
2043 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2044 rss_hf |= RTE_ETH_RSS_IPV6_TCP_EX;
2045 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2046 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2047 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2048 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP;
2049 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2050 rss_hf |= RTE_ETH_RSS_IPV6_UDP_EX;
2051 rss_conf->rss_hf = rss_hf;
2056 igb_rss_configure(struct rte_eth_dev *dev)
2058 struct rte_eth_rss_conf rss_conf;
2059 struct e1000_hw *hw;
2063 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2065 /* Fill in redirection table. */
2066 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2067 for (i = 0; i < 128; i++) {
2074 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2075 i % dev->data->nb_rx_queues : 0);
2076 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2078 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2082 * Configure the RSS key and the RSS protocols used to compute
2083 * the RSS hash of input packets.
2085 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2086 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2087 igb_rss_disable(dev);
2090 if (rss_conf.rss_key == NULL)
2091 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2092 igb_hw_rss_hash_set(hw, &rss_conf);
2096 * Check if the mac type support VMDq or not.
2097 * Return 1 if it supports, otherwise, return 0.
2100 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2102 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2104 switch (hw->mac.type) {
2125 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2131 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2133 struct rte_eth_vmdq_rx_conf *cfg;
2134 struct e1000_hw *hw;
2135 uint32_t mrqc, vt_ctl, vmolr, rctl;
2138 PMD_INIT_FUNC_TRACE();
2140 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2141 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2143 /* Check if mac type can support VMDq, return value of 0 means NOT support */
2144 if (igb_is_vmdq_supported(dev) == 0)
2147 igb_rss_disable(dev);
2149 /* RCTL: enable VLAN filter */
2150 rctl = E1000_READ_REG(hw, E1000_RCTL);
2151 rctl |= E1000_RCTL_VFE;
2152 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2154 /* MRQC: enable vmdq */
2155 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2156 mrqc |= E1000_MRQC_ENABLE_VMDQ;
2157 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2159 /* VTCTL: pool selection according to VLAN tag */
2160 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2161 if (cfg->enable_default_pool)
2162 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2163 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2164 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2166 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2167 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2168 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2169 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2172 if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_UNTAG)
2173 vmolr |= E1000_VMOLR_AUPE;
2174 if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_MC)
2175 vmolr |= E1000_VMOLR_ROMPE;
2176 if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_HASH_UC)
2177 vmolr |= E1000_VMOLR_ROPE;
2178 if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_BROADCAST)
2179 vmolr |= E1000_VMOLR_BAM;
2180 if (cfg->rx_mode & RTE_ETH_VMDQ_ACCEPT_MULTICAST)
2181 vmolr |= E1000_VMOLR_MPME;
2183 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2187 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2188 * Both 82576 and 82580 support it
2190 if (hw->mac.type != e1000_i350) {
2191 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2192 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2193 vmolr |= E1000_VMOLR_STRVLAN;
2194 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2198 /* VFTA - enable all vlan filters */
2199 for (i = 0; i < IGB_VFTA_SIZE; i++)
2200 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2202 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2203 if (hw->mac.type != e1000_82580)
2204 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2207 * RAH/RAL - allow pools to read specific mac addresses
2208 * In this case, all pools should be able to read from mac addr 0
2210 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2211 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2213 /* VLVF: set up filters for vlan tags as configured */
2214 for (i = 0; i < cfg->nb_pool_maps; i++) {
2215 /* set vlan id in VF register and set the valid bit */
2216 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE |
2217 (cfg->pool_map[i].vlan_id & RTE_ETH_VLAN_ID_MAX) |
2218 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT) &
2219 E1000_VLVF_POOLSEL_MASK)));
2222 E1000_WRITE_FLUSH(hw);
2228 /*********************************************************************
2230 * Enable receive unit.
2232 **********************************************************************/
2235 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2237 struct igb_rx_entry *rxe = rxq->sw_ring;
2241 /* Initialize software ring entries. */
2242 for (i = 0; i < rxq->nb_rx_desc; i++) {
2243 volatile union e1000_adv_rx_desc *rxd;
2244 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2247 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2248 "queue_id=%hu", rxq->queue_id);
2252 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2253 rxd = &rxq->rx_ring[i];
2254 rxd->read.hdr_addr = 0;
2255 rxd->read.pkt_addr = dma_addr;
2262 #define E1000_MRQC_DEF_Q_SHIFT (3)
2264 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2266 struct e1000_hw *hw =
2267 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2270 if (RTE_ETH_DEV_SRIOV(dev).active == RTE_ETH_8_POOLS) {
2272 * SRIOV active scheme
2273 * FIXME if support RSS together with VMDq & SRIOV
2275 mrqc = E1000_MRQC_ENABLE_VMDQ;
2276 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2277 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2278 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2279 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2281 * SRIOV inactive scheme
2283 switch (dev->data->dev_conf.rxmode.mq_mode) {
2284 case RTE_ETH_MQ_RX_RSS:
2285 igb_rss_configure(dev);
2287 case RTE_ETH_MQ_RX_VMDQ_ONLY:
2288 /*Configure general VMDQ only RX parameters*/
2289 igb_vmdq_rx_hw_configure(dev);
2291 case RTE_ETH_MQ_RX_NONE:
2292 /* if mq_mode is none, disable rss mode.*/
2294 igb_rss_disable(dev);
2303 eth_igb_rx_init(struct rte_eth_dev *dev)
2305 struct rte_eth_rxmode *rxmode;
2306 struct e1000_hw *hw;
2307 struct igb_rx_queue *rxq;
2312 uint16_t rctl_bsize;
2317 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2321 * Make sure receives are disabled while setting
2322 * up the descriptor ring.
2324 rctl = E1000_READ_REG(hw, E1000_RCTL);
2325 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2327 rxmode = &dev->data->dev_conf.rxmode;
2330 * Configure support of jumbo frames, if any.
2332 max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
2333 if (dev->data->mtu > RTE_ETHER_MTU) {
2334 rctl |= E1000_RCTL_LPE;
2337 * Set maximum packet length by default, and might be updated
2338 * together with enabling/disabling dual VLAN.
2340 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
2341 max_len += VLAN_TAG_SIZE;
2343 E1000_WRITE_REG(hw, E1000_RLPML, max_len);
2345 rctl &= ~E1000_RCTL_LPE;
2347 /* Configure and enable each RX queue. */
2349 dev->rx_pkt_burst = eth_igb_recv_pkts;
2350 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2354 rxq = dev->data->rx_queues[i];
2358 * i350 and i354 vlan packets have vlan tags byte swapped.
2360 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2361 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2362 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2364 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2367 /* Allocate buffers for descriptor rings and set up queue */
2368 ret = igb_alloc_rx_queue_mbufs(rxq);
2373 * Reset crc_len in case it was changed after queue setup by a
2376 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2377 rxq->crc_len = RTE_ETHER_CRC_LEN;
2381 bus_addr = rxq->rx_ring_phys_addr;
2382 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2384 sizeof(union e1000_adv_rx_desc));
2385 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2386 (uint32_t)(bus_addr >> 32));
2387 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2389 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2392 * Configure RX buffer size.
2394 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2395 RTE_PKTMBUF_HEADROOM);
2396 if (buf_size >= 1024) {
2398 * Configure the BSIZEPACKET field of the SRRCTL
2399 * register of the queue.
2400 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2401 * If this field is equal to 0b, then RCTL.BSIZE
2402 * determines the RX packet buffer size.
2404 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2405 E1000_SRRCTL_BSIZEPKT_MASK);
2406 buf_size = (uint16_t) ((srrctl &
2407 E1000_SRRCTL_BSIZEPKT_MASK) <<
2408 E1000_SRRCTL_BSIZEPKT_SHIFT);
2410 /* It adds dual VLAN length for supporting dual VLAN */
2411 if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
2412 if (!dev->data->scattered_rx)
2414 "forcing scatter mode");
2415 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2416 dev->data->scattered_rx = 1;
2420 * Use BSIZE field of the device RCTL register.
2422 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2423 rctl_bsize = buf_size;
2424 if (!dev->data->scattered_rx)
2425 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2426 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2427 dev->data->scattered_rx = 1;
2430 /* Set if packets are dropped when no descriptors available */
2432 srrctl |= E1000_SRRCTL_DROP_EN;
2434 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2436 /* Enable this RX queue. */
2437 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2438 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2439 rxdctl &= 0xFFF00000;
2440 rxdctl |= (rxq->pthresh & 0x1F);
2441 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2442 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2443 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2446 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
2447 if (!dev->data->scattered_rx)
2448 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2449 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2450 dev->data->scattered_rx = 1;
2454 * Setup BSIZE field of RCTL register, if needed.
2455 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2456 * register, since the code above configures the SRRCTL register of
2457 * the RX queue in such a case.
2458 * All configurable sizes are:
2459 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2460 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2461 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2462 * 2048: rctl |= E1000_RCTL_SZ_2048;
2463 * 1024: rctl |= E1000_RCTL_SZ_1024;
2464 * 512: rctl |= E1000_RCTL_SZ_512;
2465 * 256: rctl |= E1000_RCTL_SZ_256;
2467 if (rctl_bsize > 0) {
2468 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2469 rctl |= E1000_RCTL_SZ_512;
2470 else /* 256 <= buf_size < 512 - use 256 */
2471 rctl |= E1000_RCTL_SZ_256;
2475 * Configure RSS if device configured with multiple RX queues.
2477 igb_dev_mq_rx_configure(dev);
2479 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2480 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2483 * Setup the Checksum Register.
2484 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2486 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2487 rxcsum |= E1000_RXCSUM_PCSD;
2489 /* Enable both L3/L4 rx checksum offload */
2490 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM)
2491 rxcsum |= E1000_RXCSUM_IPOFL;
2493 rxcsum &= ~E1000_RXCSUM_IPOFL;
2494 if (rxmode->offloads &
2495 (RTE_ETH_RX_OFFLOAD_TCP_CKSUM | RTE_ETH_RX_OFFLOAD_UDP_CKSUM))
2496 rxcsum |= E1000_RXCSUM_TUOFL;
2498 rxcsum &= ~E1000_RXCSUM_TUOFL;
2499 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
2500 rxcsum |= E1000_RXCSUM_CRCOFL;
2502 rxcsum &= ~E1000_RXCSUM_CRCOFL;
2504 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2506 /* Setup the Receive Control Register. */
2507 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) {
2508 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2510 /* clear STRCRC bit in all queues */
2511 if (hw->mac.type == e1000_i350 ||
2512 hw->mac.type == e1000_i210 ||
2513 hw->mac.type == e1000_i211 ||
2514 hw->mac.type == e1000_i354) {
2515 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2516 rxq = dev->data->rx_queues[i];
2517 uint32_t dvmolr = E1000_READ_REG(hw,
2518 E1000_DVMOLR(rxq->reg_idx));
2519 dvmolr &= ~E1000_DVMOLR_STRCRC;
2520 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2524 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2526 /* set STRCRC bit in all queues */
2527 if (hw->mac.type == e1000_i350 ||
2528 hw->mac.type == e1000_i210 ||
2529 hw->mac.type == e1000_i211 ||
2530 hw->mac.type == e1000_i354) {
2531 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2532 rxq = dev->data->rx_queues[i];
2533 uint32_t dvmolr = E1000_READ_REG(hw,
2534 E1000_DVMOLR(rxq->reg_idx));
2535 dvmolr |= E1000_DVMOLR_STRCRC;
2536 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2541 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2542 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2543 E1000_RCTL_RDMTS_HALF |
2544 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2546 /* Make sure VLAN Filters are off. */
2547 if (dev->data->dev_conf.rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_ONLY)
2548 rctl &= ~E1000_RCTL_VFE;
2549 /* Don't store bad packets. */
2550 rctl &= ~E1000_RCTL_SBP;
2552 /* Enable Receives. */
2553 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2556 * Setup the HW Rx Head and Tail Descriptor Pointers.
2557 * This needs to be done after enable.
2559 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2560 rxq = dev->data->rx_queues[i];
2561 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2562 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2568 /*********************************************************************
2570 * Enable transmit unit.
2572 **********************************************************************/
2574 eth_igb_tx_init(struct rte_eth_dev *dev)
2576 struct e1000_hw *hw;
2577 struct igb_tx_queue *txq;
2582 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2584 /* Setup the Base and Length of the Tx Descriptor Rings. */
2585 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2587 txq = dev->data->tx_queues[i];
2588 bus_addr = txq->tx_ring_phys_addr;
2590 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2592 sizeof(union e1000_adv_tx_desc));
2593 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2594 (uint32_t)(bus_addr >> 32));
2595 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2597 /* Setup the HW Tx Head and Tail descriptor pointers. */
2598 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2599 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2601 /* Setup Transmit threshold registers. */
2602 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2603 txdctl |= txq->pthresh & 0x1F;
2604 txdctl |= ((txq->hthresh & 0x1F) << 8);
2605 txdctl |= ((txq->wthresh & 0x1F) << 16);
2606 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2607 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2610 /* Program the Transmit Control Register. */
2611 tctl = E1000_READ_REG(hw, E1000_TCTL);
2612 tctl &= ~E1000_TCTL_CT;
2613 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2614 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2616 e1000_config_collision_dist(hw);
2618 /* This write will effectively turn on the transmit unit. */
2619 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2622 /*********************************************************************
2624 * Enable VF receive unit.
2626 **********************************************************************/
2628 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2630 struct e1000_hw *hw;
2631 struct igb_rx_queue *rxq;
2634 uint16_t rctl_bsize;
2639 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2642 max_len = dev->data->mtu + E1000_ETH_OVERHEAD;
2643 e1000_rlpml_set_vf(hw, (uint16_t)(max_len + VLAN_TAG_SIZE));
2645 /* Configure and enable each RX queue. */
2647 dev->rx_pkt_burst = eth_igb_recv_pkts;
2648 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2652 rxq = dev->data->rx_queues[i];
2656 * i350VF LB vlan packets have vlan tags byte swapped.
2658 if (hw->mac.type == e1000_vfadapt_i350) {
2659 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2660 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2662 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2665 /* Allocate buffers for descriptor rings and set up queue */
2666 ret = igb_alloc_rx_queue_mbufs(rxq);
2670 bus_addr = rxq->rx_ring_phys_addr;
2671 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2673 sizeof(union e1000_adv_rx_desc));
2674 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2675 (uint32_t)(bus_addr >> 32));
2676 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2678 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2681 * Configure RX buffer size.
2683 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2684 RTE_PKTMBUF_HEADROOM);
2685 if (buf_size >= 1024) {
2687 * Configure the BSIZEPACKET field of the SRRCTL
2688 * register of the queue.
2689 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2690 * If this field is equal to 0b, then RCTL.BSIZE
2691 * determines the RX packet buffer size.
2693 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2694 E1000_SRRCTL_BSIZEPKT_MASK);
2695 buf_size = (uint16_t) ((srrctl &
2696 E1000_SRRCTL_BSIZEPKT_MASK) <<
2697 E1000_SRRCTL_BSIZEPKT_SHIFT);
2699 /* It adds dual VLAN length for supporting dual VLAN */
2700 if ((max_len + 2 * VLAN_TAG_SIZE) > buf_size) {
2701 if (!dev->data->scattered_rx)
2703 "forcing scatter mode");
2704 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2705 dev->data->scattered_rx = 1;
2709 * Use BSIZE field of the device RCTL register.
2711 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2712 rctl_bsize = buf_size;
2713 if (!dev->data->scattered_rx)
2714 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2715 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2716 dev->data->scattered_rx = 1;
2719 /* Set if packets are dropped when no descriptors available */
2721 srrctl |= E1000_SRRCTL_DROP_EN;
2723 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2725 /* Enable this RX queue. */
2726 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2727 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2728 rxdctl &= 0xFFF00000;
2729 rxdctl |= (rxq->pthresh & 0x1F);
2730 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2731 if (hw->mac.type == e1000_vfadapt) {
2733 * Workaround of 82576 VF Erratum
2734 * force set WTHRESH to 1
2735 * to avoid Write-Back not triggered sometimes
2738 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2741 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2742 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2745 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SCATTER) {
2746 if (!dev->data->scattered_rx)
2747 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2748 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2749 dev->data->scattered_rx = 1;
2753 * Setup the HW Rx Head and Tail Descriptor Pointers.
2754 * This needs to be done after enable.
2756 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2757 rxq = dev->data->rx_queues[i];
2758 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2759 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2765 /*********************************************************************
2767 * Enable VF transmit unit.
2769 **********************************************************************/
2771 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2773 struct e1000_hw *hw;
2774 struct igb_tx_queue *txq;
2778 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2780 /* Setup the Base and Length of the Tx Descriptor Rings. */
2781 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2784 txq = dev->data->tx_queues[i];
2785 bus_addr = txq->tx_ring_phys_addr;
2786 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2788 sizeof(union e1000_adv_tx_desc));
2789 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2790 (uint32_t)(bus_addr >> 32));
2791 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2793 /* Setup the HW Tx Head and Tail descriptor pointers. */
2794 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2795 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2797 /* Setup Transmit threshold registers. */
2798 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2799 txdctl |= txq->pthresh & 0x1F;
2800 txdctl |= ((txq->hthresh & 0x1F) << 8);
2801 if (hw->mac.type == e1000_82576) {
2803 * Workaround of 82576 VF Erratum
2804 * force set WTHRESH to 1
2805 * to avoid Write-Back not triggered sometimes
2808 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2811 txdctl |= ((txq->wthresh & 0x1F) << 16);
2812 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2813 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2819 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2820 struct rte_eth_rxq_info *qinfo)
2822 struct igb_rx_queue *rxq;
2824 rxq = dev->data->rx_queues[queue_id];
2826 qinfo->mp = rxq->mb_pool;
2827 qinfo->scattered_rx = dev->data->scattered_rx;
2828 qinfo->nb_desc = rxq->nb_rx_desc;
2830 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2831 qinfo->conf.rx_drop_en = rxq->drop_en;
2832 qinfo->conf.offloads = rxq->offloads;
2836 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2837 struct rte_eth_txq_info *qinfo)
2839 struct igb_tx_queue *txq;
2841 txq = dev->data->tx_queues[queue_id];
2843 qinfo->nb_desc = txq->nb_tx_desc;
2845 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2846 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2847 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2848 qinfo->conf.offloads = txq->offloads;
2852 igb_rss_conf_init(struct rte_eth_dev *dev,
2853 struct igb_rte_flow_rss_conf *out,
2854 const struct rte_flow_action_rss *in)
2856 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2858 if (in->key_len > RTE_DIM(out->key) ||
2859 ((hw->mac.type == e1000_82576) &&
2860 (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
2861 ((hw->mac.type != e1000_82576) &&
2862 (in->queue_num > IGB_MAX_RX_QUEUE_NUM)))
2864 out->conf = (struct rte_flow_action_rss){
2868 .key_len = in->key_len,
2869 .queue_num = in->queue_num,
2870 .key = memcpy(out->key, in->key, in->key_len),
2871 .queue = memcpy(out->queue, in->queue,
2872 sizeof(*in->queue) * in->queue_num),
2878 igb_action_rss_same(const struct rte_flow_action_rss *comp,
2879 const struct rte_flow_action_rss *with)
2881 return (comp->func == with->func &&
2882 comp->level == with->level &&
2883 comp->types == with->types &&
2884 comp->key_len == with->key_len &&
2885 comp->queue_num == with->queue_num &&
2886 !memcmp(comp->key, with->key, with->key_len) &&
2887 !memcmp(comp->queue, with->queue,
2888 sizeof(*with->queue) * with->queue_num));
2892 igb_config_rss_filter(struct rte_eth_dev *dev,
2893 struct igb_rte_flow_rss_conf *conf, bool add)
2897 struct rte_eth_rss_conf rss_conf = {
2898 .rss_key = conf->conf.key_len ?
2899 (void *)(uintptr_t)conf->conf.key : NULL,
2900 .rss_key_len = conf->conf.key_len,
2901 .rss_hf = conf->conf.types,
2903 struct e1000_filter_info *filter_info =
2904 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2905 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2907 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2910 if (igb_action_rss_same(&filter_info->rss_info.conf,
2912 igb_rss_disable(dev);
2913 memset(&filter_info->rss_info, 0,
2914 sizeof(struct igb_rte_flow_rss_conf));
2920 if (filter_info->rss_info.conf.queue_num)
2923 /* Fill in redirection table. */
2924 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2925 for (i = 0, j = 0; i < 128; i++, j++) {
2932 if (j == conf->conf.queue_num)
2934 q_idx = conf->conf.queue[j];
2935 reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2937 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2940 /* Configure the RSS key and the RSS protocols used to compute
2941 * the RSS hash of input packets.
2943 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2944 igb_rss_disable(dev);
2947 if (rss_conf.rss_key == NULL)
2948 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2949 igb_hw_rss_hash_set(hw, &rss_conf);
2951 if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf))