1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev_driver.h>
35 #include <rte_prefetch.h>
40 #include <rte_string_fns.h>
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
49 #define IGB_TX_IEEE1588_TMST 0
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK ( \
63 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
64 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
67 * Structure associated with each descriptor of the RX ring of a RX queue.
70 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
74 * Structure associated with each descriptor of the TX ring of a TX queue.
77 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
78 uint16_t next_id; /**< Index of next descriptor in ring. */
79 uint16_t last_id; /**< Index of last scattered descriptor. */
86 IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
90 * Structure associated with each RX queue.
93 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
94 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
95 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
96 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
97 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
98 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
99 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
100 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
101 uint16_t nb_rx_desc; /**< number of RX descriptors. */
102 uint16_t rx_tail; /**< current value of RDT register. */
103 uint16_t nb_rx_hold; /**< number of held free RX desc. */
104 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
105 uint16_t queue_id; /**< RX queue index. */
106 uint16_t reg_idx; /**< RX queue register index. */
107 uint16_t port_id; /**< Device port identifier. */
108 uint8_t pthresh; /**< Prefetch threshold register. */
109 uint8_t hthresh; /**< Host threshold register. */
110 uint8_t wthresh; /**< Write-back threshold register. */
111 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
112 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
113 uint32_t flags; /**< RX flags. */
114 uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
118 * Hardware context number
120 enum igb_advctx_num {
121 IGB_CTX_0 = 0, /**< CTX0 */
122 IGB_CTX_1 = 1, /**< CTX1 */
123 IGB_CTX_NUM = 2, /**< CTX_NUM */
126 /** Offload features */
127 union igb_tx_offload {
130 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
131 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
132 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
133 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
134 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
136 /* uint64_t unused:8; */
141 * Compare mask for igb_tx_offload.data,
142 * should be in sync with igb_tx_offload layout.
144 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
145 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
146 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
147 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
148 /** Mac + IP + TCP + Mss mask. */
149 #define TX_TSO_CMP_MASK \
150 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
153 * Strucutre to check if new context need be built
155 struct igb_advctx_info {
156 uint64_t flags; /**< ol_flags related to context build. */
157 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
158 union igb_tx_offload tx_offload;
159 /** compare mask for tx offload. */
160 union igb_tx_offload tx_offload_mask;
164 * Structure associated with each TX queue.
166 struct igb_tx_queue {
167 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
168 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
169 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
170 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
171 uint32_t txd_type; /**< Device-specific TXD type */
172 uint16_t nb_tx_desc; /**< number of TX descriptors. */
173 uint16_t tx_tail; /**< Current value of TDT register. */
175 /**< Index of first used TX descriptor. */
176 uint16_t queue_id; /**< TX queue index. */
177 uint16_t reg_idx; /**< TX queue register index. */
178 uint16_t port_id; /**< Device port identifier. */
179 uint8_t pthresh; /**< Prefetch threshold register. */
180 uint8_t hthresh; /**< Host threshold register. */
181 uint8_t wthresh; /**< Write-back threshold register. */
183 /**< Current used hardware descriptor. */
185 /**< Start context position for transmit queue. */
186 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
187 /**< Hardware context history.*/
188 uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
192 #define RTE_PMD_USE_PREFETCH
195 #ifdef RTE_PMD_USE_PREFETCH
196 #define rte_igb_prefetch(p) rte_prefetch0(p)
198 #define rte_igb_prefetch(p) do {} while(0)
201 #ifdef RTE_PMD_PACKET_PREFETCH
202 #define rte_packet_prefetch(p) rte_prefetch1(p)
204 #define rte_packet_prefetch(p) do {} while(0)
208 * Macro for VMDq feature for 1 GbE NIC.
210 #define E1000_VMOLR_SIZE (8)
211 #define IGB_TSO_MAX_HDRLEN (512)
212 #define IGB_TSO_MAX_MSS (9216)
214 /*********************************************************************
218 **********************************************************************/
221 *There're some limitations in hardware for TCP segmentation offload. We
222 *should check whether the parameters are valid.
224 static inline uint64_t
225 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
227 if (!(ol_req & PKT_TX_TCP_SEG))
229 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
230 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
231 ol_req &= ~PKT_TX_TCP_SEG;
232 ol_req |= PKT_TX_TCP_CKSUM;
238 * Advanced context descriptor are almost same between igb/ixgbe
239 * This is a separate function, looking for optimization opportunity here
240 * Rework required to go with the pre-defined values.
244 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
245 volatile struct e1000_adv_tx_context_desc *ctx_txd,
246 uint64_t ol_flags, union igb_tx_offload tx_offload)
248 uint32_t type_tucmd_mlhl;
249 uint32_t mss_l4len_idx;
250 uint32_t ctx_idx, ctx_curr;
251 uint32_t vlan_macip_lens;
252 union igb_tx_offload tx_offload_mask;
254 ctx_curr = txq->ctx_curr;
255 ctx_idx = ctx_curr + txq->ctx_start;
257 tx_offload_mask.data = 0;
260 /* Specify which HW CTX to upload. */
261 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
263 if (ol_flags & PKT_TX_VLAN_PKT)
264 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
266 /* check if TCP segmentation required for this packet */
267 if (ol_flags & PKT_TX_TCP_SEG) {
268 /* implies IP cksum in IPv4 */
269 if (ol_flags & PKT_TX_IP_CKSUM)
270 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
271 E1000_ADVTXD_TUCMD_L4T_TCP |
272 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
275 E1000_ADVTXD_TUCMD_L4T_TCP |
276 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
278 tx_offload_mask.data |= TX_TSO_CMP_MASK;
279 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
280 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
281 } else { /* no TSO, check if hardware checksum is needed */
282 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
283 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
285 if (ol_flags & PKT_TX_IP_CKSUM)
286 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
288 switch (ol_flags & PKT_TX_L4_MASK) {
289 case PKT_TX_UDP_CKSUM:
290 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
291 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
292 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
294 case PKT_TX_TCP_CKSUM:
295 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
296 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
297 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
299 case PKT_TX_SCTP_CKSUM:
300 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
301 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
302 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
305 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
306 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
311 txq->ctx_cache[ctx_curr].flags = ol_flags;
312 txq->ctx_cache[ctx_curr].tx_offload.data =
313 tx_offload_mask.data & tx_offload.data;
314 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
316 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
317 vlan_macip_lens = (uint32_t)tx_offload.data;
318 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
319 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
320 ctx_txd->seqnum_seed = 0;
324 * Check which hardware context can be used. Use the existing match
325 * or create a new context descriptor.
327 static inline uint32_t
328 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
329 union igb_tx_offload tx_offload)
331 /* If match with the current context */
332 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
333 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
334 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
335 return txq->ctx_curr;
338 /* If match with the second context */
340 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
341 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
342 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
343 return txq->ctx_curr;
346 /* Mismatch, use the previous context */
350 static inline uint32_t
351 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
353 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
354 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
357 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
358 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
359 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
363 static inline uint32_t
364 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
367 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
368 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
369 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
370 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
375 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
378 struct igb_tx_queue *txq;
379 struct igb_tx_entry *sw_ring;
380 struct igb_tx_entry *txe, *txn;
381 volatile union e1000_adv_tx_desc *txr;
382 volatile union e1000_adv_tx_desc *txd;
383 struct rte_mbuf *tx_pkt;
384 struct rte_mbuf *m_seg;
385 uint64_t buf_dma_addr;
386 uint32_t olinfo_status;
387 uint32_t cmd_type_len;
396 uint32_t new_ctx = 0;
398 union igb_tx_offload tx_offload = {0};
401 sw_ring = txq->sw_ring;
403 tx_id = txq->tx_tail;
404 txe = &sw_ring[tx_id];
406 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
408 pkt_len = tx_pkt->pkt_len;
410 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
413 * The number of descriptors that must be allocated for a
414 * packet is the number of segments of that packet, plus 1
415 * Context Descriptor for the VLAN Tag Identifier, if any.
416 * Determine the last TX descriptor to allocate in the TX ring
417 * for the packet, starting from the current position (tx_id)
420 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
422 ol_flags = tx_pkt->ol_flags;
423 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
425 /* If a Context Descriptor need be built . */
427 tx_offload.l2_len = tx_pkt->l2_len;
428 tx_offload.l3_len = tx_pkt->l3_len;
429 tx_offload.l4_len = tx_pkt->l4_len;
430 tx_offload.vlan_tci = tx_pkt->vlan_tci;
431 tx_offload.tso_segsz = tx_pkt->tso_segsz;
432 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
434 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
435 /* Only allocate context descriptor if required*/
436 new_ctx = (ctx == IGB_CTX_NUM);
437 ctx = txq->ctx_curr + txq->ctx_start;
438 tx_last = (uint16_t) (tx_last + new_ctx);
440 if (tx_last >= txq->nb_tx_desc)
441 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
443 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
444 " tx_first=%u tx_last=%u",
445 (unsigned) txq->port_id,
446 (unsigned) txq->queue_id,
452 * Check if there are enough free descriptors in the TX ring
453 * to transmit the next packet.
454 * This operation is based on the two following rules:
456 * 1- Only check that the last needed TX descriptor can be
457 * allocated (by construction, if that descriptor is free,
458 * all intermediate ones are also free).
460 * For this purpose, the index of the last TX descriptor
461 * used for a packet (the "last descriptor" of a packet)
462 * is recorded in the TX entries (the last one included)
463 * that are associated with all TX descriptors allocated
466 * 2- Avoid to allocate the last free TX descriptor of the
467 * ring, in order to never set the TDT register with the
468 * same value stored in parallel by the NIC in the TDH
469 * register, which makes the TX engine of the NIC enter
470 * in a deadlock situation.
472 * By extension, avoid to allocate a free descriptor that
473 * belongs to the last set of free descriptors allocated
474 * to the same packet previously transmitted.
478 * The "last descriptor" of the previously sent packet, if any,
479 * which used the last descriptor to allocate.
481 tx_end = sw_ring[tx_last].last_id;
484 * The next descriptor following that "last descriptor" in the
487 tx_end = sw_ring[tx_end].next_id;
490 * The "last descriptor" associated with that next descriptor.
492 tx_end = sw_ring[tx_end].last_id;
495 * Check that this descriptor is free.
497 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
504 * Set common flags of all TX Data Descriptors.
506 * The following bits must be set in all Data Descriptors:
507 * - E1000_ADVTXD_DTYP_DATA
508 * - E1000_ADVTXD_DCMD_DEXT
510 * The following bits must be set in the first Data Descriptor
511 * and are ignored in the other ones:
512 * - E1000_ADVTXD_DCMD_IFCS
513 * - E1000_ADVTXD_MAC_1588
514 * - E1000_ADVTXD_DCMD_VLE
516 * The following bits must only be set in the last Data
518 * - E1000_TXD_CMD_EOP
520 * The following bits can be set in any Data Descriptor, but
521 * are only set in the last Data Descriptor:
524 cmd_type_len = txq->txd_type |
525 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
526 if (tx_ol_req & PKT_TX_TCP_SEG)
527 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
528 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
529 #if defined(RTE_LIBRTE_IEEE1588)
530 if (ol_flags & PKT_TX_IEEE1588_TMST)
531 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
534 /* Setup TX Advanced context descriptor if required */
536 volatile struct e1000_adv_tx_context_desc *
539 ctx_txd = (volatile struct
540 e1000_adv_tx_context_desc *)
543 txn = &sw_ring[txe->next_id];
544 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
546 if (txe->mbuf != NULL) {
547 rte_pktmbuf_free_seg(txe->mbuf);
551 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
553 txe->last_id = tx_last;
554 tx_id = txe->next_id;
558 /* Setup the TX Advanced Data Descriptor */
559 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
560 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
561 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
566 txn = &sw_ring[txe->next_id];
569 if (txe->mbuf != NULL)
570 rte_pktmbuf_free_seg(txe->mbuf);
574 * Set up transmit descriptor.
576 slen = (uint16_t) m_seg->data_len;
577 buf_dma_addr = rte_mbuf_data_iova(m_seg);
578 txd->read.buffer_addr =
579 rte_cpu_to_le_64(buf_dma_addr);
580 txd->read.cmd_type_len =
581 rte_cpu_to_le_32(cmd_type_len | slen);
582 txd->read.olinfo_status =
583 rte_cpu_to_le_32(olinfo_status);
584 txe->last_id = tx_last;
585 tx_id = txe->next_id;
588 } while (m_seg != NULL);
591 * The last packet data descriptor needs End Of Packet (EOP)
592 * and Report Status (RS).
594 txd->read.cmd_type_len |=
595 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
601 * Set the Transmit Descriptor Tail (TDT).
603 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
604 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
605 (unsigned) txq->port_id, (unsigned) txq->queue_id,
606 (unsigned) tx_id, (unsigned) nb_tx);
607 txq->tx_tail = tx_id;
612 /*********************************************************************
616 **********************************************************************/
618 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
624 for (i = 0; i < nb_pkts; i++) {
627 /* Check some limitations for TSO in hardware */
628 if (m->ol_flags & PKT_TX_TCP_SEG)
629 if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
630 (m->l2_len + m->l3_len + m->l4_len >
631 IGB_TSO_MAX_HDRLEN)) {
636 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
637 rte_errno = -ENOTSUP;
641 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
642 ret = rte_validate_tx_offload(m);
648 ret = rte_net_intel_cksum_prepare(m);
658 /*********************************************************************
662 **********************************************************************/
663 #define IGB_PACKET_TYPE_IPV4 0X01
664 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
665 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
666 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
667 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
668 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
669 #define IGB_PACKET_TYPE_IPV6 0X04
670 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
671 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
672 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
673 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
674 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
675 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
676 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
677 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
678 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
679 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
680 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
681 #define IGB_PACKET_TYPE_MAX 0X80
682 #define IGB_PACKET_TYPE_MASK 0X7F
683 #define IGB_PACKET_TYPE_SHIFT 0X04
684 static inline uint32_t
685 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
687 static const uint32_t
688 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
689 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
691 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
692 RTE_PTYPE_L3_IPV4_EXT,
693 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
695 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
696 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
697 RTE_PTYPE_INNER_L3_IPV6,
698 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
699 RTE_PTYPE_L3_IPV6_EXT,
700 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
701 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
702 RTE_PTYPE_INNER_L3_IPV6_EXT,
703 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
704 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
705 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
706 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
707 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
708 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
709 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
710 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
711 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
712 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
713 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
714 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
715 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
716 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
717 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
718 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
719 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
720 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
721 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
722 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
723 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
724 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
725 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
726 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
727 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
728 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
729 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
730 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
732 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
733 return RTE_PTYPE_UNKNOWN;
735 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
737 return ptype_table[pkt_info];
740 static inline uint64_t
741 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
743 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
745 #if defined(RTE_LIBRTE_IEEE1588)
746 static uint32_t ip_pkt_etqf_map[8] = {
747 0, 0, 0, PKT_RX_IEEE1588_PTP,
751 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
752 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
754 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
755 if (hw->mac.type == e1000_i210)
756 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
758 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
766 static inline uint64_t
767 rx_desc_status_to_pkt_flags(uint32_t rx_status)
771 /* Check if VLAN present */
772 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
773 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
775 #if defined(RTE_LIBRTE_IEEE1588)
776 if (rx_status & E1000_RXD_STAT_TMST)
777 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
782 static inline uint64_t
783 rx_desc_error_to_pkt_flags(uint32_t rx_status)
786 * Bit 30: IPE, IPv4 checksum error
787 * Bit 29: L4I, L4I integrity error
790 static uint64_t error_to_pkt_flags_map[4] = {
791 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
792 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
793 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
794 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
796 return error_to_pkt_flags_map[(rx_status >>
797 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
801 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
804 struct igb_rx_queue *rxq;
805 volatile union e1000_adv_rx_desc *rx_ring;
806 volatile union e1000_adv_rx_desc *rxdp;
807 struct igb_rx_entry *sw_ring;
808 struct igb_rx_entry *rxe;
809 struct rte_mbuf *rxm;
810 struct rte_mbuf *nmb;
811 union e1000_adv_rx_desc rxd;
814 uint32_t hlen_type_rss;
824 rx_id = rxq->rx_tail;
825 rx_ring = rxq->rx_ring;
826 sw_ring = rxq->sw_ring;
827 while (nb_rx < nb_pkts) {
829 * The order of operations here is important as the DD status
830 * bit must not be read after any other descriptor fields.
831 * rx_ring and rxdp are pointing to volatile data so the order
832 * of accesses cannot be reordered by the compiler. If they were
833 * not volatile, they could be reordered which could lead to
834 * using invalid descriptor fields when read from rxd.
836 rxdp = &rx_ring[rx_id];
837 staterr = rxdp->wb.upper.status_error;
838 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
845 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
846 * likely to be invalid and to be dropped by the various
847 * validation checks performed by the network stack.
849 * Allocate a new mbuf to replenish the RX ring descriptor.
850 * If the allocation fails:
851 * - arrange for that RX descriptor to be the first one
852 * being parsed the next time the receive function is
853 * invoked [on the same queue].
855 * - Stop parsing the RX ring and return immediately.
857 * This policy do not drop the packet received in the RX
858 * descriptor for which the allocation of a new mbuf failed.
859 * Thus, it allows that packet to be later retrieved if
860 * mbuf have been freed in the mean time.
861 * As a side effect, holding RX descriptors instead of
862 * systematically giving them back to the NIC may lead to
863 * RX ring exhaustion situations.
864 * However, the NIC can gracefully prevent such situations
865 * to happen by sending specific "back-pressure" flow control
866 * frames to its peer(s).
868 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
869 "staterr=0x%x pkt_len=%u",
870 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
871 (unsigned) rx_id, (unsigned) staterr,
872 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
874 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
876 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
877 "queue_id=%u", (unsigned) rxq->port_id,
878 (unsigned) rxq->queue_id);
879 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
884 rxe = &sw_ring[rx_id];
886 if (rx_id == rxq->nb_rx_desc)
889 /* Prefetch next mbuf while processing current one. */
890 rte_igb_prefetch(sw_ring[rx_id].mbuf);
893 * When next RX descriptor is on a cache-line boundary,
894 * prefetch the next 4 RX descriptors and the next 8 pointers
897 if ((rx_id & 0x3) == 0) {
898 rte_igb_prefetch(&rx_ring[rx_id]);
899 rte_igb_prefetch(&sw_ring[rx_id]);
905 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
906 rxdp->read.hdr_addr = 0;
907 rxdp->read.pkt_addr = dma_addr;
910 * Initialize the returned mbuf.
911 * 1) setup generic mbuf fields:
912 * - number of segments,
915 * - RX port identifier.
916 * 2) integrate hardware offload data, if any:
918 * - IP checksum flag,
919 * - VLAN TCI, if any,
922 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
924 rxm->data_off = RTE_PKTMBUF_HEADROOM;
925 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
928 rxm->pkt_len = pkt_len;
929 rxm->data_len = pkt_len;
930 rxm->port = rxq->port_id;
932 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
933 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
936 * The vlan_tci field is only valid when PKT_RX_VLAN is
937 * set in the pkt_flags field and must be in CPU byte order.
939 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
940 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
941 rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
943 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
945 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
946 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
947 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
948 rxm->ol_flags = pkt_flags;
949 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
950 lo_dword.hs_rss.pkt_info);
953 * Store the mbuf address into the next entry of the array
954 * of returned packets.
956 rx_pkts[nb_rx++] = rxm;
958 rxq->rx_tail = rx_id;
961 * If the number of free RX descriptors is greater than the RX free
962 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
964 * Update the RDT with the value of the last processed RX descriptor
965 * minus 1, to guarantee that the RDT register is never equal to the
966 * RDH register, which creates a "full" ring situtation from the
967 * hardware point of view...
969 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
970 if (nb_hold > rxq->rx_free_thresh) {
971 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
972 "nb_hold=%u nb_rx=%u",
973 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
974 (unsigned) rx_id, (unsigned) nb_hold,
976 rx_id = (uint16_t) ((rx_id == 0) ?
977 (rxq->nb_rx_desc - 1) : (rx_id - 1));
978 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
981 rxq->nb_rx_hold = nb_hold;
986 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
989 struct igb_rx_queue *rxq;
990 volatile union e1000_adv_rx_desc *rx_ring;
991 volatile union e1000_adv_rx_desc *rxdp;
992 struct igb_rx_entry *sw_ring;
993 struct igb_rx_entry *rxe;
994 struct rte_mbuf *first_seg;
995 struct rte_mbuf *last_seg;
996 struct rte_mbuf *rxm;
997 struct rte_mbuf *nmb;
998 union e1000_adv_rx_desc rxd;
999 uint64_t dma; /* Physical address of mbuf data buffer */
1001 uint32_t hlen_type_rss;
1011 rx_id = rxq->rx_tail;
1012 rx_ring = rxq->rx_ring;
1013 sw_ring = rxq->sw_ring;
1016 * Retrieve RX context of current packet, if any.
1018 first_seg = rxq->pkt_first_seg;
1019 last_seg = rxq->pkt_last_seg;
1021 while (nb_rx < nb_pkts) {
1024 * The order of operations here is important as the DD status
1025 * bit must not be read after any other descriptor fields.
1026 * rx_ring and rxdp are pointing to volatile data so the order
1027 * of accesses cannot be reordered by the compiler. If they were
1028 * not volatile, they could be reordered which could lead to
1029 * using invalid descriptor fields when read from rxd.
1031 rxdp = &rx_ring[rx_id];
1032 staterr = rxdp->wb.upper.status_error;
1033 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1040 * Allocate a new mbuf to replenish the RX ring descriptor.
1041 * If the allocation fails:
1042 * - arrange for that RX descriptor to be the first one
1043 * being parsed the next time the receive function is
1044 * invoked [on the same queue].
1046 * - Stop parsing the RX ring and return immediately.
1048 * This policy does not drop the packet received in the RX
1049 * descriptor for which the allocation of a new mbuf failed.
1050 * Thus, it allows that packet to be later retrieved if
1051 * mbuf have been freed in the mean time.
1052 * As a side effect, holding RX descriptors instead of
1053 * systematically giving them back to the NIC may lead to
1054 * RX ring exhaustion situations.
1055 * However, the NIC can gracefully prevent such situations
1056 * to happen by sending specific "back-pressure" flow control
1057 * frames to its peer(s).
1059 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1060 "staterr=0x%x data_len=%u",
1061 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1062 (unsigned) rx_id, (unsigned) staterr,
1063 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1065 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1067 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1068 "queue_id=%u", (unsigned) rxq->port_id,
1069 (unsigned) rxq->queue_id);
1070 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1075 rxe = &sw_ring[rx_id];
1077 if (rx_id == rxq->nb_rx_desc)
1080 /* Prefetch next mbuf while processing current one. */
1081 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1084 * When next RX descriptor is on a cache-line boundary,
1085 * prefetch the next 4 RX descriptors and the next 8 pointers
1088 if ((rx_id & 0x3) == 0) {
1089 rte_igb_prefetch(&rx_ring[rx_id]);
1090 rte_igb_prefetch(&sw_ring[rx_id]);
1094 * Update RX descriptor with the physical address of the new
1095 * data buffer of the new allocated mbuf.
1099 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1100 rxdp->read.pkt_addr = dma;
1101 rxdp->read.hdr_addr = 0;
1104 * Set data length & data buffer address of mbuf.
1106 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1107 rxm->data_len = data_len;
1108 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1111 * If this is the first buffer of the received packet,
1112 * set the pointer to the first mbuf of the packet and
1113 * initialize its context.
1114 * Otherwise, update the total length and the number of segments
1115 * of the current scattered packet, and update the pointer to
1116 * the last mbuf of the current packet.
1118 if (first_seg == NULL) {
1120 first_seg->pkt_len = data_len;
1121 first_seg->nb_segs = 1;
1123 first_seg->pkt_len += data_len;
1124 first_seg->nb_segs++;
1125 last_seg->next = rxm;
1129 * If this is not the last buffer of the received packet,
1130 * update the pointer to the last mbuf of the current scattered
1131 * packet and continue to parse the RX ring.
1133 if (! (staterr & E1000_RXD_STAT_EOP)) {
1139 * This is the last buffer of the received packet.
1140 * If the CRC is not stripped by the hardware:
1141 * - Subtract the CRC length from the total packet length.
1142 * - If the last buffer only contains the whole CRC or a part
1143 * of it, free the mbuf associated to the last buffer.
1144 * If part of the CRC is also contained in the previous
1145 * mbuf, subtract the length of that CRC part from the
1146 * data length of the previous mbuf.
1149 if (unlikely(rxq->crc_len > 0)) {
1150 first_seg->pkt_len -= ETHER_CRC_LEN;
1151 if (data_len <= ETHER_CRC_LEN) {
1152 rte_pktmbuf_free_seg(rxm);
1153 first_seg->nb_segs--;
1154 last_seg->data_len = (uint16_t)
1155 (last_seg->data_len -
1156 (ETHER_CRC_LEN - data_len));
1157 last_seg->next = NULL;
1160 (uint16_t) (data_len - ETHER_CRC_LEN);
1164 * Initialize the first mbuf of the returned packet:
1165 * - RX port identifier,
1166 * - hardware offload data, if any:
1167 * - RSS flag & hash,
1168 * - IP checksum flag,
1169 * - VLAN TCI, if any,
1172 first_seg->port = rxq->port_id;
1173 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1176 * The vlan_tci field is only valid when PKT_RX_VLAN is
1177 * set in the pkt_flags field and must be in CPU byte order.
1179 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1180 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1181 first_seg->vlan_tci =
1182 rte_be_to_cpu_16(rxd.wb.upper.vlan);
1184 first_seg->vlan_tci =
1185 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1187 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1188 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1189 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1190 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1191 first_seg->ol_flags = pkt_flags;
1192 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1193 lower.lo_dword.hs_rss.pkt_info);
1195 /* Prefetch data of first segment, if configured to do so. */
1196 rte_packet_prefetch((char *)first_seg->buf_addr +
1197 first_seg->data_off);
1200 * Store the mbuf address into the next entry of the array
1201 * of returned packets.
1203 rx_pkts[nb_rx++] = first_seg;
1206 * Setup receipt context for a new packet.
1212 * Record index of the next RX descriptor to probe.
1214 rxq->rx_tail = rx_id;
1217 * Save receive context.
1219 rxq->pkt_first_seg = first_seg;
1220 rxq->pkt_last_seg = last_seg;
1223 * If the number of free RX descriptors is greater than the RX free
1224 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1226 * Update the RDT with the value of the last processed RX descriptor
1227 * minus 1, to guarantee that the RDT register is never equal to the
1228 * RDH register, which creates a "full" ring situtation from the
1229 * hardware point of view...
1231 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1232 if (nb_hold > rxq->rx_free_thresh) {
1233 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1234 "nb_hold=%u nb_rx=%u",
1235 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1236 (unsigned) rx_id, (unsigned) nb_hold,
1238 rx_id = (uint16_t) ((rx_id == 0) ?
1239 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1240 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1243 rxq->nb_rx_hold = nb_hold;
1248 * Maximum number of Ring Descriptors.
1250 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1251 * desscriptors should meet the following condition:
1252 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1256 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1260 if (txq->sw_ring != NULL) {
1261 for (i = 0; i < txq->nb_tx_desc; i++) {
1262 if (txq->sw_ring[i].mbuf != NULL) {
1263 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1264 txq->sw_ring[i].mbuf = NULL;
1271 igb_tx_queue_release(struct igb_tx_queue *txq)
1274 igb_tx_queue_release_mbufs(txq);
1275 rte_free(txq->sw_ring);
1281 eth_igb_tx_queue_release(void *txq)
1283 igb_tx_queue_release(txq);
1287 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1289 struct igb_tx_entry *sw_ring;
1290 volatile union e1000_adv_tx_desc *txr;
1291 uint16_t tx_first; /* First segment analyzed. */
1292 uint16_t tx_id; /* Current segment being processed. */
1293 uint16_t tx_last; /* Last segment in the current packet. */
1294 uint16_t tx_next; /* First segment of the next packet. */
1299 sw_ring = txq->sw_ring;
1303 * tx_tail is the last sent packet on the sw_ring. Goto the end
1304 * of that packet (the last segment in the packet chain) and
1305 * then the next segment will be the start of the oldest segment
1306 * in the sw_ring. This is the first packet that will be
1307 * attempted to be freed.
1310 /* Get last segment in most recently added packet. */
1311 tx_first = sw_ring[txq->tx_tail].last_id;
1313 /* Get the next segment, which is the oldest segment in ring. */
1314 tx_first = sw_ring[tx_first].next_id;
1316 /* Set the current index to the first. */
1320 * Loop through each packet. For each packet, verify that an
1321 * mbuf exists and that the last segment is free. If so, free
1325 tx_last = sw_ring[tx_id].last_id;
1327 if (sw_ring[tx_last].mbuf) {
1328 if (txr[tx_last].wb.status &
1329 E1000_TXD_STAT_DD) {
1331 * Increment the number of packets
1336 /* Get the start of the next packet. */
1337 tx_next = sw_ring[tx_last].next_id;
1340 * Loop through all segments in a
1344 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
1345 sw_ring[tx_id].mbuf = NULL;
1346 sw_ring[tx_id].last_id = tx_id;
1348 /* Move to next segemnt. */
1349 tx_id = sw_ring[tx_id].next_id;
1351 } while (tx_id != tx_next);
1353 if (unlikely(count == (int)free_cnt))
1357 * mbuf still in use, nothing left to
1363 * There are multiple reasons to be here:
1364 * 1) All the packets on the ring have been
1365 * freed - tx_id is equal to tx_first
1366 * and some packets have been freed.
1368 * 2) Interfaces has not sent a rings worth of
1369 * packets yet, so the segment after tail is
1370 * still empty. Or a previous call to this
1371 * function freed some of the segments but
1372 * not all so there is a hole in the list.
1373 * Hopefully this is a rare case.
1374 * - Walk the list and find the next mbuf. If
1375 * there isn't one, then done.
1377 if (likely((tx_id == tx_first) && (count != 0)))
1381 * Walk the list and find the next mbuf, if any.
1384 /* Move to next segemnt. */
1385 tx_id = sw_ring[tx_id].next_id;
1387 if (sw_ring[tx_id].mbuf)
1390 } while (tx_id != tx_first);
1393 * Determine why previous loop bailed. If there
1394 * is not an mbuf, done.
1396 if (sw_ring[tx_id].mbuf == NULL)
1407 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1409 return igb_tx_done_cleanup(txq, free_cnt);
1413 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1418 memset((void*)&txq->ctx_cache, 0,
1419 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1423 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1425 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1426 struct igb_tx_entry *txe = txq->sw_ring;
1428 struct e1000_hw *hw;
1430 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1431 /* Zero out HW ring memory */
1432 for (i = 0; i < txq->nb_tx_desc; i++) {
1433 txq->tx_ring[i] = zeroed_desc;
1436 /* Initialize ring entries */
1437 prev = (uint16_t)(txq->nb_tx_desc - 1);
1438 for (i = 0; i < txq->nb_tx_desc; i++) {
1439 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1441 txd->wb.status = E1000_TXD_STAT_DD;
1444 txe[prev].next_id = i;
1448 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1449 /* 82575 specific, each tx queue will use 2 hw contexts */
1450 if (hw->mac.type == e1000_82575)
1451 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1453 igb_reset_tx_queue_stat(txq);
1457 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1459 uint64_t tx_offload_capa;
1462 tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1463 DEV_TX_OFFLOAD_IPV4_CKSUM |
1464 DEV_TX_OFFLOAD_UDP_CKSUM |
1465 DEV_TX_OFFLOAD_TCP_CKSUM |
1466 DEV_TX_OFFLOAD_SCTP_CKSUM |
1467 DEV_TX_OFFLOAD_TCP_TSO |
1468 DEV_TX_OFFLOAD_MULTI_SEGS;
1470 return tx_offload_capa;
1474 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1476 uint64_t tx_queue_offload_capa;
1478 tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1480 return tx_queue_offload_capa;
1484 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1487 unsigned int socket_id,
1488 const struct rte_eth_txconf *tx_conf)
1490 const struct rte_memzone *tz;
1491 struct igb_tx_queue *txq;
1492 struct e1000_hw *hw;
1496 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1498 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1501 * Validate number of transmit descriptors.
1502 * It must not exceed hardware maximum, and must be multiple
1505 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1506 (nb_desc > E1000_MAX_RING_DESC) ||
1507 (nb_desc < E1000_MIN_RING_DESC)) {
1512 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1515 if (tx_conf->tx_free_thresh != 0)
1516 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1517 "used for the 1G driver.");
1518 if (tx_conf->tx_rs_thresh != 0)
1519 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1520 "used for the 1G driver.");
1521 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1522 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1523 "consider setting the TX WTHRESH value to 4, 8, "
1526 /* Free memory prior to re-allocation if needed */
1527 if (dev->data->tx_queues[queue_idx] != NULL) {
1528 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1529 dev->data->tx_queues[queue_idx] = NULL;
1532 /* First allocate the tx queue data structure */
1533 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1534 RTE_CACHE_LINE_SIZE);
1539 * Allocate TX ring hardware descriptors. A memzone large enough to
1540 * handle the maximum ring size is allocated in order to allow for
1541 * resizing in later calls to the queue setup function.
1543 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1544 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1545 E1000_ALIGN, socket_id);
1547 igb_tx_queue_release(txq);
1551 txq->nb_tx_desc = nb_desc;
1552 txq->pthresh = tx_conf->tx_thresh.pthresh;
1553 txq->hthresh = tx_conf->tx_thresh.hthresh;
1554 txq->wthresh = tx_conf->tx_thresh.wthresh;
1555 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1557 txq->queue_id = queue_idx;
1558 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1559 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1560 txq->port_id = dev->data->port_id;
1562 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1563 txq->tx_ring_phys_addr = tz->iova;
1565 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1566 /* Allocate software ring */
1567 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1568 sizeof(struct igb_tx_entry) * nb_desc,
1569 RTE_CACHE_LINE_SIZE);
1570 if (txq->sw_ring == NULL) {
1571 igb_tx_queue_release(txq);
1574 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1575 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1577 igb_reset_tx_queue(txq, dev);
1578 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1579 dev->tx_pkt_prepare = ð_igb_prep_pkts;
1580 dev->data->tx_queues[queue_idx] = txq;
1581 txq->offloads = offloads;
1587 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1591 if (rxq->sw_ring != NULL) {
1592 for (i = 0; i < rxq->nb_rx_desc; i++) {
1593 if (rxq->sw_ring[i].mbuf != NULL) {
1594 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1595 rxq->sw_ring[i].mbuf = NULL;
1602 igb_rx_queue_release(struct igb_rx_queue *rxq)
1605 igb_rx_queue_release_mbufs(rxq);
1606 rte_free(rxq->sw_ring);
1612 eth_igb_rx_queue_release(void *rxq)
1614 igb_rx_queue_release(rxq);
1618 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1620 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1623 /* Zero out HW ring memory */
1624 for (i = 0; i < rxq->nb_rx_desc; i++) {
1625 rxq->rx_ring[i] = zeroed_desc;
1629 rxq->pkt_first_seg = NULL;
1630 rxq->pkt_last_seg = NULL;
1634 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1636 uint64_t rx_offload_capa;
1639 rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1640 DEV_RX_OFFLOAD_VLAN_FILTER |
1641 DEV_RX_OFFLOAD_IPV4_CKSUM |
1642 DEV_RX_OFFLOAD_UDP_CKSUM |
1643 DEV_RX_OFFLOAD_TCP_CKSUM |
1644 DEV_RX_OFFLOAD_JUMBO_FRAME |
1645 DEV_RX_OFFLOAD_KEEP_CRC |
1646 DEV_RX_OFFLOAD_SCATTER;
1648 return rx_offload_capa;
1652 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1654 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1655 uint64_t rx_queue_offload_capa;
1657 switch (hw->mac.type) {
1658 case e1000_vfadapt_i350:
1660 * As only one Rx queue can be used, let per queue offloading
1661 * capability be same to per port queue offloading capability
1662 * for better convenience.
1664 rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1667 rx_queue_offload_capa = 0;
1669 return rx_queue_offload_capa;
1673 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1676 unsigned int socket_id,
1677 const struct rte_eth_rxconf *rx_conf,
1678 struct rte_mempool *mp)
1680 const struct rte_memzone *rz;
1681 struct igb_rx_queue *rxq;
1682 struct e1000_hw *hw;
1686 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1688 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1691 * Validate number of receive descriptors.
1692 * It must not exceed hardware maximum, and must be multiple
1695 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1696 (nb_desc > E1000_MAX_RING_DESC) ||
1697 (nb_desc < E1000_MIN_RING_DESC)) {
1701 /* Free memory prior to re-allocation if needed */
1702 if (dev->data->rx_queues[queue_idx] != NULL) {
1703 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1704 dev->data->rx_queues[queue_idx] = NULL;
1707 /* First allocate the RX queue data structure. */
1708 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1709 RTE_CACHE_LINE_SIZE);
1712 rxq->offloads = offloads;
1714 rxq->nb_rx_desc = nb_desc;
1715 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1716 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1717 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1718 if (rxq->wthresh > 0 &&
1719 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1721 rxq->drop_en = rx_conf->rx_drop_en;
1722 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1723 rxq->queue_id = queue_idx;
1724 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1725 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1726 rxq->port_id = dev->data->port_id;
1727 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1728 rxq->crc_len = ETHER_CRC_LEN;
1733 * Allocate RX ring hardware descriptors. A memzone large enough to
1734 * handle the maximum ring size is allocated in order to allow for
1735 * resizing in later calls to the queue setup function.
1737 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1738 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1739 E1000_ALIGN, socket_id);
1741 igb_rx_queue_release(rxq);
1744 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1745 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1746 rxq->rx_ring_phys_addr = rz->iova;
1747 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1749 /* Allocate software ring. */
1750 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1751 sizeof(struct igb_rx_entry) * nb_desc,
1752 RTE_CACHE_LINE_SIZE);
1753 if (rxq->sw_ring == NULL) {
1754 igb_rx_queue_release(rxq);
1757 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1758 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1760 dev->data->rx_queues[queue_idx] = rxq;
1761 igb_reset_rx_queue(rxq);
1767 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1769 #define IGB_RXQ_SCAN_INTERVAL 4
1770 volatile union e1000_adv_rx_desc *rxdp;
1771 struct igb_rx_queue *rxq;
1774 rxq = dev->data->rx_queues[rx_queue_id];
1775 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1777 while ((desc < rxq->nb_rx_desc) &&
1778 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1779 desc += IGB_RXQ_SCAN_INTERVAL;
1780 rxdp += IGB_RXQ_SCAN_INTERVAL;
1781 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1782 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1783 desc - rxq->nb_rx_desc]);
1790 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1792 volatile union e1000_adv_rx_desc *rxdp;
1793 struct igb_rx_queue *rxq = rx_queue;
1796 if (unlikely(offset >= rxq->nb_rx_desc))
1798 desc = rxq->rx_tail + offset;
1799 if (desc >= rxq->nb_rx_desc)
1800 desc -= rxq->nb_rx_desc;
1802 rxdp = &rxq->rx_ring[desc];
1803 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1807 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1809 struct igb_rx_queue *rxq = rx_queue;
1810 volatile uint32_t *status;
1813 if (unlikely(offset >= rxq->nb_rx_desc))
1816 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1817 return RTE_ETH_RX_DESC_UNAVAIL;
1819 desc = rxq->rx_tail + offset;
1820 if (desc >= rxq->nb_rx_desc)
1821 desc -= rxq->nb_rx_desc;
1823 status = &rxq->rx_ring[desc].wb.upper.status_error;
1824 if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1825 return RTE_ETH_RX_DESC_DONE;
1827 return RTE_ETH_RX_DESC_AVAIL;
1831 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1833 struct igb_tx_queue *txq = tx_queue;
1834 volatile uint32_t *status;
1837 if (unlikely(offset >= txq->nb_tx_desc))
1840 desc = txq->tx_tail + offset;
1841 if (desc >= txq->nb_tx_desc)
1842 desc -= txq->nb_tx_desc;
1844 status = &txq->tx_ring[desc].wb.status;
1845 if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1846 return RTE_ETH_TX_DESC_DONE;
1848 return RTE_ETH_TX_DESC_FULL;
1852 igb_dev_clear_queues(struct rte_eth_dev *dev)
1855 struct igb_tx_queue *txq;
1856 struct igb_rx_queue *rxq;
1858 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1859 txq = dev->data->tx_queues[i];
1861 igb_tx_queue_release_mbufs(txq);
1862 igb_reset_tx_queue(txq, dev);
1866 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1867 rxq = dev->data->rx_queues[i];
1869 igb_rx_queue_release_mbufs(rxq);
1870 igb_reset_rx_queue(rxq);
1876 igb_dev_free_queues(struct rte_eth_dev *dev)
1880 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1881 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1882 dev->data->rx_queues[i] = NULL;
1884 dev->data->nb_rx_queues = 0;
1886 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1887 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1888 dev->data->tx_queues[i] = NULL;
1890 dev->data->nb_tx_queues = 0;
1894 * Receive Side Scaling (RSS).
1895 * See section 7.1.1.7 in the following document:
1896 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1899 * The source and destination IP addresses of the IP header and the source and
1900 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1901 * against a configurable random key to compute a 32-bit RSS hash result.
1902 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1903 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1904 * RSS output index which is used as the RX queue index where to store the
1906 * The following output is supplied in the RX write-back descriptor:
1907 * - 32-bit result of the Microsoft RSS hash function,
1908 * - 4-bit RSS type field.
1912 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1913 * Used as the default key.
1915 static uint8_t rss_intel_key[40] = {
1916 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1917 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1918 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1919 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1920 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1924 igb_rss_disable(struct rte_eth_dev *dev)
1926 struct e1000_hw *hw;
1929 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1930 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1931 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1932 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1936 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1944 hash_key = rss_conf->rss_key;
1945 if (hash_key != NULL) {
1946 /* Fill in RSS hash key */
1947 for (i = 0; i < 10; i++) {
1948 rss_key = hash_key[(i * 4)];
1949 rss_key |= hash_key[(i * 4) + 1] << 8;
1950 rss_key |= hash_key[(i * 4) + 2] << 16;
1951 rss_key |= hash_key[(i * 4) + 3] << 24;
1952 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1956 /* Set configured hashing protocols in MRQC register */
1957 rss_hf = rss_conf->rss_hf;
1958 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1959 if (rss_hf & ETH_RSS_IPV4)
1960 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1961 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1962 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1963 if (rss_hf & ETH_RSS_IPV6)
1964 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1965 if (rss_hf & ETH_RSS_IPV6_EX)
1966 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1967 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1968 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1969 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1970 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1971 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1972 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1973 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1974 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1975 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1976 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1977 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1981 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1982 struct rte_eth_rss_conf *rss_conf)
1984 struct e1000_hw *hw;
1988 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1991 * Before changing anything, first check that the update RSS operation
1992 * does not attempt to disable RSS, if RSS was enabled at
1993 * initialization time, or does not attempt to enable RSS, if RSS was
1994 * disabled at initialization time.
1996 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1997 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1998 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1999 if (rss_hf != 0) /* Enable RSS */
2001 return 0; /* Nothing to do */
2004 if (rss_hf == 0) /* Disable RSS */
2006 igb_hw_rss_hash_set(hw, rss_conf);
2010 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2011 struct rte_eth_rss_conf *rss_conf)
2013 struct e1000_hw *hw;
2020 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2021 hash_key = rss_conf->rss_key;
2022 if (hash_key != NULL) {
2023 /* Return RSS hash key */
2024 for (i = 0; i < 10; i++) {
2025 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2026 hash_key[(i * 4)] = rss_key & 0x000000FF;
2027 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2028 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2029 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2033 /* Get RSS functions configured in MRQC register */
2034 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2035 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2036 rss_conf->rss_hf = 0;
2040 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2041 rss_hf |= ETH_RSS_IPV4;
2042 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2043 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2044 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2045 rss_hf |= ETH_RSS_IPV6;
2046 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2047 rss_hf |= ETH_RSS_IPV6_EX;
2048 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2049 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2050 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2051 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2052 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2053 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2054 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2055 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2056 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2057 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2058 rss_conf->rss_hf = rss_hf;
2063 igb_rss_configure(struct rte_eth_dev *dev)
2065 struct rte_eth_rss_conf rss_conf;
2066 struct e1000_hw *hw;
2070 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2072 /* Fill in redirection table. */
2073 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2074 for (i = 0; i < 128; i++) {
2081 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2082 i % dev->data->nb_rx_queues : 0);
2083 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2085 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2089 * Configure the RSS key and the RSS protocols used to compute
2090 * the RSS hash of input packets.
2092 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2093 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2094 igb_rss_disable(dev);
2097 if (rss_conf.rss_key == NULL)
2098 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2099 igb_hw_rss_hash_set(hw, &rss_conf);
2103 * Check if the mac type support VMDq or not.
2104 * Return 1 if it supports, otherwise, return 0.
2107 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2109 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2111 switch (hw->mac.type) {
2132 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2138 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2140 struct rte_eth_vmdq_rx_conf *cfg;
2141 struct e1000_hw *hw;
2142 uint32_t mrqc, vt_ctl, vmolr, rctl;
2145 PMD_INIT_FUNC_TRACE();
2147 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2148 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2150 /* Check if mac type can support VMDq, return value of 0 means NOT support */
2151 if (igb_is_vmdq_supported(dev) == 0)
2154 igb_rss_disable(dev);
2156 /* RCTL: eanble VLAN filter */
2157 rctl = E1000_READ_REG(hw, E1000_RCTL);
2158 rctl |= E1000_RCTL_VFE;
2159 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2161 /* MRQC: enable vmdq */
2162 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2163 mrqc |= E1000_MRQC_ENABLE_VMDQ;
2164 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2166 /* VTCTL: pool selection according to VLAN tag */
2167 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2168 if (cfg->enable_default_pool)
2169 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2170 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2171 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2173 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2174 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2175 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2176 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2179 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2180 vmolr |= E1000_VMOLR_AUPE;
2181 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2182 vmolr |= E1000_VMOLR_ROMPE;
2183 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2184 vmolr |= E1000_VMOLR_ROPE;
2185 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2186 vmolr |= E1000_VMOLR_BAM;
2187 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2188 vmolr |= E1000_VMOLR_MPME;
2190 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2194 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2195 * Both 82576 and 82580 support it
2197 if (hw->mac.type != e1000_i350) {
2198 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2199 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2200 vmolr |= E1000_VMOLR_STRVLAN;
2201 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2205 /* VFTA - enable all vlan filters */
2206 for (i = 0; i < IGB_VFTA_SIZE; i++)
2207 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2209 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2210 if (hw->mac.type != e1000_82580)
2211 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2214 * RAH/RAL - allow pools to read specific mac addresses
2215 * In this case, all pools should be able to read from mac addr 0
2217 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2218 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2220 /* VLVF: set up filters for vlan tags as configured */
2221 for (i = 0; i < cfg->nb_pool_maps; i++) {
2222 /* set vlan id in VF register and set the valid bit */
2223 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2224 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2225 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2226 E1000_VLVF_POOLSEL_MASK)));
2229 E1000_WRITE_FLUSH(hw);
2235 /*********************************************************************
2237 * Enable receive unit.
2239 **********************************************************************/
2242 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2244 struct igb_rx_entry *rxe = rxq->sw_ring;
2248 /* Initialize software ring entries. */
2249 for (i = 0; i < rxq->nb_rx_desc; i++) {
2250 volatile union e1000_adv_rx_desc *rxd;
2251 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2254 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2255 "queue_id=%hu", rxq->queue_id);
2259 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2260 rxd = &rxq->rx_ring[i];
2261 rxd->read.hdr_addr = 0;
2262 rxd->read.pkt_addr = dma_addr;
2269 #define E1000_MRQC_DEF_Q_SHIFT (3)
2271 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2273 struct e1000_hw *hw =
2274 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2277 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2279 * SRIOV active scheme
2280 * FIXME if support RSS together with VMDq & SRIOV
2282 mrqc = E1000_MRQC_ENABLE_VMDQ;
2283 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2284 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2285 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2286 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2288 * SRIOV inactive scheme
2290 switch (dev->data->dev_conf.rxmode.mq_mode) {
2292 igb_rss_configure(dev);
2294 case ETH_MQ_RX_VMDQ_ONLY:
2295 /*Configure general VMDQ only RX parameters*/
2296 igb_vmdq_rx_hw_configure(dev);
2298 case ETH_MQ_RX_NONE:
2299 /* if mq_mode is none, disable rss mode.*/
2301 igb_rss_disable(dev);
2310 eth_igb_rx_init(struct rte_eth_dev *dev)
2312 struct rte_eth_rxmode *rxmode;
2313 struct e1000_hw *hw;
2314 struct igb_rx_queue *rxq;
2319 uint16_t rctl_bsize;
2323 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2327 * Make sure receives are disabled while setting
2328 * up the descriptor ring.
2330 rctl = E1000_READ_REG(hw, E1000_RCTL);
2331 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2333 rxmode = &dev->data->dev_conf.rxmode;
2336 * Configure support of jumbo frames, if any.
2338 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2339 rctl |= E1000_RCTL_LPE;
2342 * Set maximum packet length by default, and might be updated
2343 * together with enabling/disabling dual VLAN.
2345 E1000_WRITE_REG(hw, E1000_RLPML,
2346 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2349 rctl &= ~E1000_RCTL_LPE;
2351 /* Configure and enable each RX queue. */
2353 dev->rx_pkt_burst = eth_igb_recv_pkts;
2354 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2358 rxq = dev->data->rx_queues[i];
2362 * i350 and i354 vlan packets have vlan tags byte swapped.
2364 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2365 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2366 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2368 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2371 /* Allocate buffers for descriptor rings and set up queue */
2372 ret = igb_alloc_rx_queue_mbufs(rxq);
2377 * Reset crc_len in case it was changed after queue setup by a
2380 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2381 rxq->crc_len = ETHER_CRC_LEN;
2385 bus_addr = rxq->rx_ring_phys_addr;
2386 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2388 sizeof(union e1000_adv_rx_desc));
2389 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2390 (uint32_t)(bus_addr >> 32));
2391 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2393 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2396 * Configure RX buffer size.
2398 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2399 RTE_PKTMBUF_HEADROOM);
2400 if (buf_size >= 1024) {
2402 * Configure the BSIZEPACKET field of the SRRCTL
2403 * register of the queue.
2404 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2405 * If this field is equal to 0b, then RCTL.BSIZE
2406 * determines the RX packet buffer size.
2408 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2409 E1000_SRRCTL_BSIZEPKT_MASK);
2410 buf_size = (uint16_t) ((srrctl &
2411 E1000_SRRCTL_BSIZEPKT_MASK) <<
2412 E1000_SRRCTL_BSIZEPKT_SHIFT);
2414 /* It adds dual VLAN length for supporting dual VLAN */
2415 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2416 2 * VLAN_TAG_SIZE) > buf_size){
2417 if (!dev->data->scattered_rx)
2419 "forcing scatter mode");
2420 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2421 dev->data->scattered_rx = 1;
2425 * Use BSIZE field of the device RCTL register.
2427 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2428 rctl_bsize = buf_size;
2429 if (!dev->data->scattered_rx)
2430 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2431 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2432 dev->data->scattered_rx = 1;
2435 /* Set if packets are dropped when no descriptors available */
2437 srrctl |= E1000_SRRCTL_DROP_EN;
2439 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2441 /* Enable this RX queue. */
2442 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2443 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2444 rxdctl &= 0xFFF00000;
2445 rxdctl |= (rxq->pthresh & 0x1F);
2446 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2447 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2448 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2451 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2452 if (!dev->data->scattered_rx)
2453 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2454 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2455 dev->data->scattered_rx = 1;
2459 * Setup BSIZE field of RCTL register, if needed.
2460 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2461 * register, since the code above configures the SRRCTL register of
2462 * the RX queue in such a case.
2463 * All configurable sizes are:
2464 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2465 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2466 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2467 * 2048: rctl |= E1000_RCTL_SZ_2048;
2468 * 1024: rctl |= E1000_RCTL_SZ_1024;
2469 * 512: rctl |= E1000_RCTL_SZ_512;
2470 * 256: rctl |= E1000_RCTL_SZ_256;
2472 if (rctl_bsize > 0) {
2473 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2474 rctl |= E1000_RCTL_SZ_512;
2475 else /* 256 <= buf_size < 512 - use 256 */
2476 rctl |= E1000_RCTL_SZ_256;
2480 * Configure RSS if device configured with multiple RX queues.
2482 igb_dev_mq_rx_configure(dev);
2484 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2485 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2488 * Setup the Checksum Register.
2489 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2491 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2492 rxcsum |= E1000_RXCSUM_PCSD;
2494 /* Enable both L3/L4 rx checksum offload */
2495 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
2496 rxcsum |= E1000_RXCSUM_IPOFL;
2498 rxcsum &= ~E1000_RXCSUM_IPOFL;
2499 if (rxmode->offloads &
2500 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
2501 rxcsum |= E1000_RXCSUM_TUOFL;
2503 rxcsum &= ~E1000_RXCSUM_TUOFL;
2504 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2505 rxcsum |= E1000_RXCSUM_CRCOFL;
2507 rxcsum &= ~E1000_RXCSUM_CRCOFL;
2509 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2511 /* Setup the Receive Control Register. */
2512 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
2513 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2515 /* clear STRCRC bit in all queues */
2516 if (hw->mac.type == e1000_i350 ||
2517 hw->mac.type == e1000_i210 ||
2518 hw->mac.type == e1000_i211 ||
2519 hw->mac.type == e1000_i354) {
2520 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2521 rxq = dev->data->rx_queues[i];
2522 uint32_t dvmolr = E1000_READ_REG(hw,
2523 E1000_DVMOLR(rxq->reg_idx));
2524 dvmolr &= ~E1000_DVMOLR_STRCRC;
2525 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2529 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2531 /* set STRCRC bit in all queues */
2532 if (hw->mac.type == e1000_i350 ||
2533 hw->mac.type == e1000_i210 ||
2534 hw->mac.type == e1000_i211 ||
2535 hw->mac.type == e1000_i354) {
2536 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2537 rxq = dev->data->rx_queues[i];
2538 uint32_t dvmolr = E1000_READ_REG(hw,
2539 E1000_DVMOLR(rxq->reg_idx));
2540 dvmolr |= E1000_DVMOLR_STRCRC;
2541 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2546 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2547 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2548 E1000_RCTL_RDMTS_HALF |
2549 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2551 /* Make sure VLAN Filters are off. */
2552 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2553 rctl &= ~E1000_RCTL_VFE;
2554 /* Don't store bad packets. */
2555 rctl &= ~E1000_RCTL_SBP;
2557 /* Enable Receives. */
2558 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2561 * Setup the HW Rx Head and Tail Descriptor Pointers.
2562 * This needs to be done after enable.
2564 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2565 rxq = dev->data->rx_queues[i];
2566 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2567 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2573 /*********************************************************************
2575 * Enable transmit unit.
2577 **********************************************************************/
2579 eth_igb_tx_init(struct rte_eth_dev *dev)
2581 struct e1000_hw *hw;
2582 struct igb_tx_queue *txq;
2587 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2589 /* Setup the Base and Length of the Tx Descriptor Rings. */
2590 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2592 txq = dev->data->tx_queues[i];
2593 bus_addr = txq->tx_ring_phys_addr;
2595 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2597 sizeof(union e1000_adv_tx_desc));
2598 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2599 (uint32_t)(bus_addr >> 32));
2600 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2602 /* Setup the HW Tx Head and Tail descriptor pointers. */
2603 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2604 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2606 /* Setup Transmit threshold registers. */
2607 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2608 txdctl |= txq->pthresh & 0x1F;
2609 txdctl |= ((txq->hthresh & 0x1F) << 8);
2610 txdctl |= ((txq->wthresh & 0x1F) << 16);
2611 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2612 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2615 /* Program the Transmit Control Register. */
2616 tctl = E1000_READ_REG(hw, E1000_TCTL);
2617 tctl &= ~E1000_TCTL_CT;
2618 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2619 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2621 e1000_config_collision_dist(hw);
2623 /* This write will effectively turn on the transmit unit. */
2624 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2627 /*********************************************************************
2629 * Enable VF receive unit.
2631 **********************************************************************/
2633 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2635 struct e1000_hw *hw;
2636 struct igb_rx_queue *rxq;
2639 uint16_t rctl_bsize;
2643 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2646 e1000_rlpml_set_vf(hw,
2647 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2650 /* Configure and enable each RX queue. */
2652 dev->rx_pkt_burst = eth_igb_recv_pkts;
2653 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2657 rxq = dev->data->rx_queues[i];
2661 * i350VF LB vlan packets have vlan tags byte swapped.
2663 if (hw->mac.type == e1000_vfadapt_i350) {
2664 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2665 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2667 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2670 /* Allocate buffers for descriptor rings and set up queue */
2671 ret = igb_alloc_rx_queue_mbufs(rxq);
2675 bus_addr = rxq->rx_ring_phys_addr;
2676 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2678 sizeof(union e1000_adv_rx_desc));
2679 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2680 (uint32_t)(bus_addr >> 32));
2681 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2683 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2686 * Configure RX buffer size.
2688 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2689 RTE_PKTMBUF_HEADROOM);
2690 if (buf_size >= 1024) {
2692 * Configure the BSIZEPACKET field of the SRRCTL
2693 * register of the queue.
2694 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2695 * If this field is equal to 0b, then RCTL.BSIZE
2696 * determines the RX packet buffer size.
2698 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2699 E1000_SRRCTL_BSIZEPKT_MASK);
2700 buf_size = (uint16_t) ((srrctl &
2701 E1000_SRRCTL_BSIZEPKT_MASK) <<
2702 E1000_SRRCTL_BSIZEPKT_SHIFT);
2704 /* It adds dual VLAN length for supporting dual VLAN */
2705 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2706 2 * VLAN_TAG_SIZE) > buf_size){
2707 if (!dev->data->scattered_rx)
2709 "forcing scatter mode");
2710 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2711 dev->data->scattered_rx = 1;
2715 * Use BSIZE field of the device RCTL register.
2717 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2718 rctl_bsize = buf_size;
2719 if (!dev->data->scattered_rx)
2720 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2721 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2722 dev->data->scattered_rx = 1;
2725 /* Set if packets are dropped when no descriptors available */
2727 srrctl |= E1000_SRRCTL_DROP_EN;
2729 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2731 /* Enable this RX queue. */
2732 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2733 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2734 rxdctl &= 0xFFF00000;
2735 rxdctl |= (rxq->pthresh & 0x1F);
2736 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2737 if (hw->mac.type == e1000_vfadapt) {
2739 * Workaround of 82576 VF Erratum
2740 * force set WTHRESH to 1
2741 * to avoid Write-Back not triggered sometimes
2744 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2747 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2748 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2751 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2752 if (!dev->data->scattered_rx)
2753 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2754 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2755 dev->data->scattered_rx = 1;
2759 * Setup the HW Rx Head and Tail Descriptor Pointers.
2760 * This needs to be done after enable.
2762 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2763 rxq = dev->data->rx_queues[i];
2764 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2765 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2771 /*********************************************************************
2773 * Enable VF transmit unit.
2775 **********************************************************************/
2777 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2779 struct e1000_hw *hw;
2780 struct igb_tx_queue *txq;
2784 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2786 /* Setup the Base and Length of the Tx Descriptor Rings. */
2787 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2790 txq = dev->data->tx_queues[i];
2791 bus_addr = txq->tx_ring_phys_addr;
2792 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2794 sizeof(union e1000_adv_tx_desc));
2795 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2796 (uint32_t)(bus_addr >> 32));
2797 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2799 /* Setup the HW Tx Head and Tail descriptor pointers. */
2800 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2801 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2803 /* Setup Transmit threshold registers. */
2804 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2805 txdctl |= txq->pthresh & 0x1F;
2806 txdctl |= ((txq->hthresh & 0x1F) << 8);
2807 if (hw->mac.type == e1000_82576) {
2809 * Workaround of 82576 VF Erratum
2810 * force set WTHRESH to 1
2811 * to avoid Write-Back not triggered sometimes
2814 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2817 txdctl |= ((txq->wthresh & 0x1F) << 16);
2818 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2819 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2825 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2826 struct rte_eth_rxq_info *qinfo)
2828 struct igb_rx_queue *rxq;
2830 rxq = dev->data->rx_queues[queue_id];
2832 qinfo->mp = rxq->mb_pool;
2833 qinfo->scattered_rx = dev->data->scattered_rx;
2834 qinfo->nb_desc = rxq->nb_rx_desc;
2836 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2837 qinfo->conf.rx_drop_en = rxq->drop_en;
2838 qinfo->conf.offloads = rxq->offloads;
2842 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2843 struct rte_eth_txq_info *qinfo)
2845 struct igb_tx_queue *txq;
2847 txq = dev->data->tx_queues[queue_id];
2849 qinfo->nb_desc = txq->nb_tx_desc;
2851 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2852 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2853 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2854 qinfo->conf.offloads = txq->offloads;
2858 igb_rss_conf_init(struct rte_eth_dev *dev,
2859 struct igb_rte_flow_rss_conf *out,
2860 const struct rte_flow_action_rss *in)
2862 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2864 if (in->key_len > RTE_DIM(out->key) ||
2865 ((hw->mac.type == e1000_82576) &&
2866 (in->queue_num > IGB_MAX_RX_QUEUE_NUM_82576)) ||
2867 ((hw->mac.type != e1000_82576) &&
2868 (in->queue_num > IGB_MAX_RX_QUEUE_NUM)))
2870 out->conf = (struct rte_flow_action_rss){
2874 .key_len = in->key_len,
2875 .queue_num = in->queue_num,
2876 .key = memcpy(out->key, in->key, in->key_len),
2877 .queue = memcpy(out->queue, in->queue,
2878 sizeof(*in->queue) * in->queue_num),
2884 igb_action_rss_same(const struct rte_flow_action_rss *comp,
2885 const struct rte_flow_action_rss *with)
2887 return (comp->func == with->func &&
2888 comp->level == with->level &&
2889 comp->types == with->types &&
2890 comp->key_len == with->key_len &&
2891 comp->queue_num == with->queue_num &&
2892 !memcmp(comp->key, with->key, with->key_len) &&
2893 !memcmp(comp->queue, with->queue,
2894 sizeof(*with->queue) * with->queue_num));
2898 igb_config_rss_filter(struct rte_eth_dev *dev,
2899 struct igb_rte_flow_rss_conf *conf, bool add)
2903 struct rte_eth_rss_conf rss_conf = {
2904 .rss_key = conf->conf.key_len ?
2905 (void *)(uintptr_t)conf->conf.key : NULL,
2906 .rss_key_len = conf->conf.key_len,
2907 .rss_hf = conf->conf.types,
2909 struct e1000_filter_info *filter_info =
2910 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2911 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2913 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2916 if (igb_action_rss_same(&filter_info->rss_info.conf,
2918 igb_rss_disable(dev);
2919 memset(&filter_info->rss_info, 0,
2920 sizeof(struct igb_rte_flow_rss_conf));
2926 if (filter_info->rss_info.conf.queue_num)
2929 /* Fill in redirection table. */
2930 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2931 for (i = 0, j = 0; i < 128; i++, j++) {
2938 if (j == conf->conf.queue_num)
2940 q_idx = conf->conf.queue[j];
2941 reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2943 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2946 /* Configure the RSS key and the RSS protocols used to compute
2947 * the RSS hash of input packets.
2949 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2950 igb_rss_disable(dev);
2953 if (rss_conf.rss_key == NULL)
2954 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2955 igb_hw_rss_hash_set(hw, &rss_conf);
2957 if (igb_rss_conf_init(dev, &filter_info->rss_info, &conf->conf))