1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
15 #include <rte_interrupts.h>
16 #include <rte_byteorder.h>
17 #include <rte_common.h>
19 #include <rte_debug.h>
21 #include <rte_memory.h>
22 #include <rte_memcpy.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
33 #include <rte_ether.h>
34 #include <rte_ethdev_driver.h>
35 #include <rte_prefetch.h>
40 #include <rte_string_fns.h>
42 #include "e1000_logs.h"
43 #include "base/e1000_api.h"
44 #include "e1000_ethdev.h"
46 #ifdef RTE_LIBRTE_IEEE1588
47 #define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
49 #define IGB_TX_IEEE1588_TMST 0
51 /* Bit Mask to indicate what bits required for building TX context */
52 #define IGB_TX_OFFLOAD_MASK ( \
59 #define IGB_TX_OFFLOAD_NOTSUP_MASK \
60 (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
63 * Structure associated with each descriptor of the RX ring of a RX queue.
66 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
70 * Structure associated with each descriptor of the TX ring of a TX queue.
73 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
74 uint16_t next_id; /**< Index of next descriptor in ring. */
75 uint16_t last_id; /**< Index of last scattered descriptor. */
82 IGB_RXQ_FLAG_LB_BSWAP_VLAN = 0x01,
86 * Structure associated with each RX queue.
89 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
90 volatile union e1000_adv_rx_desc *rx_ring; /**< RX ring virtual address. */
91 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
92 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
93 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
94 struct igb_rx_entry *sw_ring; /**< address of RX software ring. */
95 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
96 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
97 uint16_t nb_rx_desc; /**< number of RX descriptors. */
98 uint16_t rx_tail; /**< current value of RDT register. */
99 uint16_t nb_rx_hold; /**< number of held free RX desc. */
100 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
101 uint16_t queue_id; /**< RX queue index. */
102 uint16_t reg_idx; /**< RX queue register index. */
103 uint16_t port_id; /**< Device port identifier. */
104 uint8_t pthresh; /**< Prefetch threshold register. */
105 uint8_t hthresh; /**< Host threshold register. */
106 uint8_t wthresh; /**< Write-back threshold register. */
107 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
108 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
109 uint32_t flags; /**< RX flags. */
110 uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
114 * Hardware context number
116 enum igb_advctx_num {
117 IGB_CTX_0 = 0, /**< CTX0 */
118 IGB_CTX_1 = 1, /**< CTX1 */
119 IGB_CTX_NUM = 2, /**< CTX_NUM */
122 /** Offload features */
123 union igb_tx_offload {
126 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
127 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
128 uint64_t vlan_tci:16; /**< VLAN Tag Control Identifier(CPU order). */
129 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
130 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
132 /* uint64_t unused:8; */
137 * Compare mask for igb_tx_offload.data,
138 * should be in sync with igb_tx_offload layout.
140 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
141 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
142 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
143 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
144 /** Mac + IP + TCP + Mss mask. */
145 #define TX_TSO_CMP_MASK \
146 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
149 * Strucutre to check if new context need be built
151 struct igb_advctx_info {
152 uint64_t flags; /**< ol_flags related to context build. */
153 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
154 union igb_tx_offload tx_offload;
155 /** compare mask for tx offload. */
156 union igb_tx_offload tx_offload_mask;
160 * Structure associated with each TX queue.
162 struct igb_tx_queue {
163 volatile union e1000_adv_tx_desc *tx_ring; /**< TX ring address */
164 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
165 struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
166 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
167 uint32_t txd_type; /**< Device-specific TXD type */
168 uint16_t nb_tx_desc; /**< number of TX descriptors. */
169 uint16_t tx_tail; /**< Current value of TDT register. */
171 /**< Index of first used TX descriptor. */
172 uint16_t queue_id; /**< TX queue index. */
173 uint16_t reg_idx; /**< TX queue register index. */
174 uint16_t port_id; /**< Device port identifier. */
175 uint8_t pthresh; /**< Prefetch threshold register. */
176 uint8_t hthresh; /**< Host threshold register. */
177 uint8_t wthresh; /**< Write-back threshold register. */
179 /**< Current used hardware descriptor. */
181 /**< Start context position for transmit queue. */
182 struct igb_advctx_info ctx_cache[IGB_CTX_NUM];
183 /**< Hardware context history.*/
184 uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
188 #define RTE_PMD_USE_PREFETCH
191 #ifdef RTE_PMD_USE_PREFETCH
192 #define rte_igb_prefetch(p) rte_prefetch0(p)
194 #define rte_igb_prefetch(p) do {} while(0)
197 #ifdef RTE_PMD_PACKET_PREFETCH
198 #define rte_packet_prefetch(p) rte_prefetch1(p)
200 #define rte_packet_prefetch(p) do {} while(0)
204 * Macro for VMDq feature for 1 GbE NIC.
206 #define E1000_VMOLR_SIZE (8)
207 #define IGB_TSO_MAX_HDRLEN (512)
208 #define IGB_TSO_MAX_MSS (9216)
210 /*********************************************************************
214 **********************************************************************/
217 *There're some limitations in hardware for TCP segmentation offload. We
218 *should check whether the parameters are valid.
220 static inline uint64_t
221 check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
223 if (!(ol_req & PKT_TX_TCP_SEG))
225 if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
226 ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
227 ol_req &= ~PKT_TX_TCP_SEG;
228 ol_req |= PKT_TX_TCP_CKSUM;
234 * Advanced context descriptor are almost same between igb/ixgbe
235 * This is a separate function, looking for optimization opportunity here
236 * Rework required to go with the pre-defined values.
240 igbe_set_xmit_ctx(struct igb_tx_queue* txq,
241 volatile struct e1000_adv_tx_context_desc *ctx_txd,
242 uint64_t ol_flags, union igb_tx_offload tx_offload)
244 uint32_t type_tucmd_mlhl;
245 uint32_t mss_l4len_idx;
246 uint32_t ctx_idx, ctx_curr;
247 uint32_t vlan_macip_lens;
248 union igb_tx_offload tx_offload_mask;
250 ctx_curr = txq->ctx_curr;
251 ctx_idx = ctx_curr + txq->ctx_start;
253 tx_offload_mask.data = 0;
256 /* Specify which HW CTX to upload. */
257 mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
259 if (ol_flags & PKT_TX_VLAN_PKT)
260 tx_offload_mask.data |= TX_VLAN_CMP_MASK;
262 /* check if TCP segmentation required for this packet */
263 if (ol_flags & PKT_TX_TCP_SEG) {
264 /* implies IP cksum in IPv4 */
265 if (ol_flags & PKT_TX_IP_CKSUM)
266 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
267 E1000_ADVTXD_TUCMD_L4T_TCP |
268 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
270 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV6 |
271 E1000_ADVTXD_TUCMD_L4T_TCP |
272 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
274 tx_offload_mask.data |= TX_TSO_CMP_MASK;
275 mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
276 mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
277 } else { /* no TSO, check if hardware checksum is needed */
278 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
279 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
281 if (ol_flags & PKT_TX_IP_CKSUM)
282 type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
284 switch (ol_flags & PKT_TX_L4_MASK) {
285 case PKT_TX_UDP_CKSUM:
286 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
287 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
288 mss_l4len_idx |= sizeof(struct udp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
290 case PKT_TX_TCP_CKSUM:
291 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
292 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
293 mss_l4len_idx |= sizeof(struct tcp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
295 case PKT_TX_SCTP_CKSUM:
296 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
297 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
298 mss_l4len_idx |= sizeof(struct sctp_hdr) << E1000_ADVTXD_L4LEN_SHIFT;
301 type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_RSV |
302 E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
307 txq->ctx_cache[ctx_curr].flags = ol_flags;
308 txq->ctx_cache[ctx_curr].tx_offload.data =
309 tx_offload_mask.data & tx_offload.data;
310 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
312 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
313 vlan_macip_lens = (uint32_t)tx_offload.data;
314 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
315 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
316 ctx_txd->seqnum_seed = 0;
320 * Check which hardware context can be used. Use the existing match
321 * or create a new context descriptor.
323 static inline uint32_t
324 what_advctx_update(struct igb_tx_queue *txq, uint64_t flags,
325 union igb_tx_offload tx_offload)
327 /* If match with the current context */
328 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
329 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
330 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
331 return txq->ctx_curr;
334 /* If match with the second context */
336 if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) &&
337 (txq->ctx_cache[txq->ctx_curr].tx_offload.data ==
338 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data & tx_offload.data)))) {
339 return txq->ctx_curr;
342 /* Mismatch, use the previous context */
346 static inline uint32_t
347 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
349 static const uint32_t l4_olinfo[2] = {0, E1000_ADVTXD_POPTS_TXSM};
350 static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
353 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
354 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
355 tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
359 static inline uint32_t
360 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
363 static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
364 static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
365 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
366 cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
371 eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
374 struct igb_tx_queue *txq;
375 struct igb_tx_entry *sw_ring;
376 struct igb_tx_entry *txe, *txn;
377 volatile union e1000_adv_tx_desc *txr;
378 volatile union e1000_adv_tx_desc *txd;
379 struct rte_mbuf *tx_pkt;
380 struct rte_mbuf *m_seg;
381 uint64_t buf_dma_addr;
382 uint32_t olinfo_status;
383 uint32_t cmd_type_len;
392 uint32_t new_ctx = 0;
394 union igb_tx_offload tx_offload = {0};
397 sw_ring = txq->sw_ring;
399 tx_id = txq->tx_tail;
400 txe = &sw_ring[tx_id];
402 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
404 pkt_len = tx_pkt->pkt_len;
406 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
409 * The number of descriptors that must be allocated for a
410 * packet is the number of segments of that packet, plus 1
411 * Context Descriptor for the VLAN Tag Identifier, if any.
412 * Determine the last TX descriptor to allocate in the TX ring
413 * for the packet, starting from the current position (tx_id)
416 tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
418 ol_flags = tx_pkt->ol_flags;
419 tx_ol_req = ol_flags & IGB_TX_OFFLOAD_MASK;
421 /* If a Context Descriptor need be built . */
423 tx_offload.l2_len = tx_pkt->l2_len;
424 tx_offload.l3_len = tx_pkt->l3_len;
425 tx_offload.l4_len = tx_pkt->l4_len;
426 tx_offload.vlan_tci = tx_pkt->vlan_tci;
427 tx_offload.tso_segsz = tx_pkt->tso_segsz;
428 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
430 ctx = what_advctx_update(txq, tx_ol_req, tx_offload);
431 /* Only allocate context descriptor if required*/
432 new_ctx = (ctx == IGB_CTX_NUM);
433 ctx = txq->ctx_curr + txq->ctx_start;
434 tx_last = (uint16_t) (tx_last + new_ctx);
436 if (tx_last >= txq->nb_tx_desc)
437 tx_last = (uint16_t) (tx_last - txq->nb_tx_desc);
439 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
440 " tx_first=%u tx_last=%u",
441 (unsigned) txq->port_id,
442 (unsigned) txq->queue_id,
448 * Check if there are enough free descriptors in the TX ring
449 * to transmit the next packet.
450 * This operation is based on the two following rules:
452 * 1- Only check that the last needed TX descriptor can be
453 * allocated (by construction, if that descriptor is free,
454 * all intermediate ones are also free).
456 * For this purpose, the index of the last TX descriptor
457 * used for a packet (the "last descriptor" of a packet)
458 * is recorded in the TX entries (the last one included)
459 * that are associated with all TX descriptors allocated
462 * 2- Avoid to allocate the last free TX descriptor of the
463 * ring, in order to never set the TDT register with the
464 * same value stored in parallel by the NIC in the TDH
465 * register, which makes the TX engine of the NIC enter
466 * in a deadlock situation.
468 * By extension, avoid to allocate a free descriptor that
469 * belongs to the last set of free descriptors allocated
470 * to the same packet previously transmitted.
474 * The "last descriptor" of the previously sent packet, if any,
475 * which used the last descriptor to allocate.
477 tx_end = sw_ring[tx_last].last_id;
480 * The next descriptor following that "last descriptor" in the
483 tx_end = sw_ring[tx_end].next_id;
486 * The "last descriptor" associated with that next descriptor.
488 tx_end = sw_ring[tx_end].last_id;
491 * Check that this descriptor is free.
493 if (! (txr[tx_end].wb.status & E1000_TXD_STAT_DD)) {
500 * Set common flags of all TX Data Descriptors.
502 * The following bits must be set in all Data Descriptors:
503 * - E1000_ADVTXD_DTYP_DATA
504 * - E1000_ADVTXD_DCMD_DEXT
506 * The following bits must be set in the first Data Descriptor
507 * and are ignored in the other ones:
508 * - E1000_ADVTXD_DCMD_IFCS
509 * - E1000_ADVTXD_MAC_1588
510 * - E1000_ADVTXD_DCMD_VLE
512 * The following bits must only be set in the last Data
514 * - E1000_TXD_CMD_EOP
516 * The following bits can be set in any Data Descriptor, but
517 * are only set in the last Data Descriptor:
520 cmd_type_len = txq->txd_type |
521 E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
522 if (tx_ol_req & PKT_TX_TCP_SEG)
523 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
524 olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
525 #if defined(RTE_LIBRTE_IEEE1588)
526 if (ol_flags & PKT_TX_IEEE1588_TMST)
527 cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
530 /* Setup TX Advanced context descriptor if required */
532 volatile struct e1000_adv_tx_context_desc *
535 ctx_txd = (volatile struct
536 e1000_adv_tx_context_desc *)
539 txn = &sw_ring[txe->next_id];
540 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
542 if (txe->mbuf != NULL) {
543 rte_pktmbuf_free_seg(txe->mbuf);
547 igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req, tx_offload);
549 txe->last_id = tx_last;
550 tx_id = txe->next_id;
554 /* Setup the TX Advanced Data Descriptor */
555 cmd_type_len |= tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
556 olinfo_status |= tx_desc_cksum_flags_to_olinfo(tx_ol_req);
557 olinfo_status |= (ctx << E1000_ADVTXD_IDX_SHIFT);
562 txn = &sw_ring[txe->next_id];
565 if (txe->mbuf != NULL)
566 rte_pktmbuf_free_seg(txe->mbuf);
570 * Set up transmit descriptor.
572 slen = (uint16_t) m_seg->data_len;
573 buf_dma_addr = rte_mbuf_data_iova(m_seg);
574 txd->read.buffer_addr =
575 rte_cpu_to_le_64(buf_dma_addr);
576 txd->read.cmd_type_len =
577 rte_cpu_to_le_32(cmd_type_len | slen);
578 txd->read.olinfo_status =
579 rte_cpu_to_le_32(olinfo_status);
580 txe->last_id = tx_last;
581 tx_id = txe->next_id;
584 } while (m_seg != NULL);
587 * The last packet data descriptor needs End Of Packet (EOP)
588 * and Report Status (RS).
590 txd->read.cmd_type_len |=
591 rte_cpu_to_le_32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS);
597 * Set the Transmit Descriptor Tail (TDT).
599 E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
600 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
601 (unsigned) txq->port_id, (unsigned) txq->queue_id,
602 (unsigned) tx_id, (unsigned) nb_tx);
603 txq->tx_tail = tx_id;
608 /*********************************************************************
612 **********************************************************************/
614 eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
620 for (i = 0; i < nb_pkts; i++) {
623 /* Check some limitations for TSO in hardware */
624 if (m->ol_flags & PKT_TX_TCP_SEG)
625 if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
626 (m->l2_len + m->l3_len + m->l4_len >
627 IGB_TSO_MAX_HDRLEN)) {
632 if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
633 rte_errno = -ENOTSUP;
637 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
638 ret = rte_validate_tx_offload(m);
644 ret = rte_net_intel_cksum_prepare(m);
654 /*********************************************************************
658 **********************************************************************/
659 #define IGB_PACKET_TYPE_IPV4 0X01
660 #define IGB_PACKET_TYPE_IPV4_TCP 0X11
661 #define IGB_PACKET_TYPE_IPV4_UDP 0X21
662 #define IGB_PACKET_TYPE_IPV4_SCTP 0X41
663 #define IGB_PACKET_TYPE_IPV4_EXT 0X03
664 #define IGB_PACKET_TYPE_IPV4_EXT_SCTP 0X43
665 #define IGB_PACKET_TYPE_IPV6 0X04
666 #define IGB_PACKET_TYPE_IPV6_TCP 0X14
667 #define IGB_PACKET_TYPE_IPV6_UDP 0X24
668 #define IGB_PACKET_TYPE_IPV6_EXT 0X0C
669 #define IGB_PACKET_TYPE_IPV6_EXT_TCP 0X1C
670 #define IGB_PACKET_TYPE_IPV6_EXT_UDP 0X2C
671 #define IGB_PACKET_TYPE_IPV4_IPV6 0X05
672 #define IGB_PACKET_TYPE_IPV4_IPV6_TCP 0X15
673 #define IGB_PACKET_TYPE_IPV4_IPV6_UDP 0X25
674 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
675 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
676 #define IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
677 #define IGB_PACKET_TYPE_MAX 0X80
678 #define IGB_PACKET_TYPE_MASK 0X7F
679 #define IGB_PACKET_TYPE_SHIFT 0X04
680 static inline uint32_t
681 igb_rxd_pkt_info_to_pkt_type(uint16_t pkt_info)
683 static const uint32_t
684 ptype_table[IGB_PACKET_TYPE_MAX] __rte_cache_aligned = {
685 [IGB_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
687 [IGB_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
688 RTE_PTYPE_L3_IPV4_EXT,
689 [IGB_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
691 [IGB_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
692 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
693 RTE_PTYPE_INNER_L3_IPV6,
694 [IGB_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
695 RTE_PTYPE_L3_IPV6_EXT,
696 [IGB_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
697 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
698 RTE_PTYPE_INNER_L3_IPV6_EXT,
699 [IGB_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
700 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
701 [IGB_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
702 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
703 [IGB_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
704 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
705 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
706 [IGB_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
707 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
708 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
709 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
710 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
711 [IGB_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
712 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
713 [IGB_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
714 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
715 [IGB_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
716 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
717 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
718 [IGB_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
719 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
720 [IGB_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
721 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
722 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
723 [IGB_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
724 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
725 [IGB_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
726 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
728 if (unlikely(pkt_info & E1000_RXDADV_PKTTYPE_ETQF))
729 return RTE_PTYPE_UNKNOWN;
731 pkt_info = (pkt_info >> IGB_PACKET_TYPE_SHIFT) & IGB_PACKET_TYPE_MASK;
733 return ptype_table[pkt_info];
736 static inline uint64_t
737 rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
739 uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
741 #if defined(RTE_LIBRTE_IEEE1588)
742 static uint32_t ip_pkt_etqf_map[8] = {
743 0, 0, 0, PKT_RX_IEEE1588_PTP,
747 struct rte_eth_dev dev = rte_eth_devices[rxq->port_id];
748 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev.data->dev_private);
750 /* EtherType is in bits 8:10 in Packet Type, and not in the default 0:2 */
751 if (hw->mac.type == e1000_i210)
752 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 12) & 0x07];
754 pkt_flags |= ip_pkt_etqf_map[(hl_tp_rs >> 4) & 0x07];
762 static inline uint64_t
763 rx_desc_status_to_pkt_flags(uint32_t rx_status)
767 /* Check if VLAN present */
768 pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
769 PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
771 #if defined(RTE_LIBRTE_IEEE1588)
772 if (rx_status & E1000_RXD_STAT_TMST)
773 pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
778 static inline uint64_t
779 rx_desc_error_to_pkt_flags(uint32_t rx_status)
782 * Bit 30: IPE, IPv4 checksum error
783 * Bit 29: L4I, L4I integrity error
786 static uint64_t error_to_pkt_flags_map[4] = {
787 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
788 PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
789 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
790 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
792 return error_to_pkt_flags_map[(rx_status >>
793 E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
797 eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
800 struct igb_rx_queue *rxq;
801 volatile union e1000_adv_rx_desc *rx_ring;
802 volatile union e1000_adv_rx_desc *rxdp;
803 struct igb_rx_entry *sw_ring;
804 struct igb_rx_entry *rxe;
805 struct rte_mbuf *rxm;
806 struct rte_mbuf *nmb;
807 union e1000_adv_rx_desc rxd;
810 uint32_t hlen_type_rss;
820 rx_id = rxq->rx_tail;
821 rx_ring = rxq->rx_ring;
822 sw_ring = rxq->sw_ring;
823 while (nb_rx < nb_pkts) {
825 * The order of operations here is important as the DD status
826 * bit must not be read after any other descriptor fields.
827 * rx_ring and rxdp are pointing to volatile data so the order
828 * of accesses cannot be reordered by the compiler. If they were
829 * not volatile, they could be reordered which could lead to
830 * using invalid descriptor fields when read from rxd.
832 rxdp = &rx_ring[rx_id];
833 staterr = rxdp->wb.upper.status_error;
834 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
841 * If the E1000_RXD_STAT_EOP flag is not set, the RX packet is
842 * likely to be invalid and to be dropped by the various
843 * validation checks performed by the network stack.
845 * Allocate a new mbuf to replenish the RX ring descriptor.
846 * If the allocation fails:
847 * - arrange for that RX descriptor to be the first one
848 * being parsed the next time the receive function is
849 * invoked [on the same queue].
851 * - Stop parsing the RX ring and return immediately.
853 * This policy do not drop the packet received in the RX
854 * descriptor for which the allocation of a new mbuf failed.
855 * Thus, it allows that packet to be later retrieved if
856 * mbuf have been freed in the mean time.
857 * As a side effect, holding RX descriptors instead of
858 * systematically giving them back to the NIC may lead to
859 * RX ring exhaustion situations.
860 * However, the NIC can gracefully prevent such situations
861 * to happen by sending specific "back-pressure" flow control
862 * frames to its peer(s).
864 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
865 "staterr=0x%x pkt_len=%u",
866 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
867 (unsigned) rx_id, (unsigned) staterr,
868 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
870 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
872 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
873 "queue_id=%u", (unsigned) rxq->port_id,
874 (unsigned) rxq->queue_id);
875 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
880 rxe = &sw_ring[rx_id];
882 if (rx_id == rxq->nb_rx_desc)
885 /* Prefetch next mbuf while processing current one. */
886 rte_igb_prefetch(sw_ring[rx_id].mbuf);
889 * When next RX descriptor is on a cache-line boundary,
890 * prefetch the next 4 RX descriptors and the next 8 pointers
893 if ((rx_id & 0x3) == 0) {
894 rte_igb_prefetch(&rx_ring[rx_id]);
895 rte_igb_prefetch(&sw_ring[rx_id]);
901 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
902 rxdp->read.hdr_addr = 0;
903 rxdp->read.pkt_addr = dma_addr;
906 * Initialize the returned mbuf.
907 * 1) setup generic mbuf fields:
908 * - number of segments,
911 * - RX port identifier.
912 * 2) integrate hardware offload data, if any:
914 * - IP checksum flag,
915 * - VLAN TCI, if any,
918 pkt_len = (uint16_t) (rte_le_to_cpu_16(rxd.wb.upper.length) -
920 rxm->data_off = RTE_PKTMBUF_HEADROOM;
921 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
924 rxm->pkt_len = pkt_len;
925 rxm->data_len = pkt_len;
926 rxm->port = rxq->port_id;
928 rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
929 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
932 * The vlan_tci field is only valid when PKT_RX_VLAN is
933 * set in the pkt_flags field and must be in CPU byte order.
935 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
936 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
937 rxm->vlan_tci = rte_be_to_cpu_16(rxd.wb.upper.vlan);
939 rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
941 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
942 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
943 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
944 rxm->ol_flags = pkt_flags;
945 rxm->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.lower.
946 lo_dword.hs_rss.pkt_info);
949 * Store the mbuf address into the next entry of the array
950 * of returned packets.
952 rx_pkts[nb_rx++] = rxm;
954 rxq->rx_tail = rx_id;
957 * If the number of free RX descriptors is greater than the RX free
958 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
960 * Update the RDT with the value of the last processed RX descriptor
961 * minus 1, to guarantee that the RDT register is never equal to the
962 * RDH register, which creates a "full" ring situtation from the
963 * hardware point of view...
965 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
966 if (nb_hold > rxq->rx_free_thresh) {
967 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
968 "nb_hold=%u nb_rx=%u",
969 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
970 (unsigned) rx_id, (unsigned) nb_hold,
972 rx_id = (uint16_t) ((rx_id == 0) ?
973 (rxq->nb_rx_desc - 1) : (rx_id - 1));
974 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
977 rxq->nb_rx_hold = nb_hold;
982 eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
985 struct igb_rx_queue *rxq;
986 volatile union e1000_adv_rx_desc *rx_ring;
987 volatile union e1000_adv_rx_desc *rxdp;
988 struct igb_rx_entry *sw_ring;
989 struct igb_rx_entry *rxe;
990 struct rte_mbuf *first_seg;
991 struct rte_mbuf *last_seg;
992 struct rte_mbuf *rxm;
993 struct rte_mbuf *nmb;
994 union e1000_adv_rx_desc rxd;
995 uint64_t dma; /* Physical address of mbuf data buffer */
997 uint32_t hlen_type_rss;
1007 rx_id = rxq->rx_tail;
1008 rx_ring = rxq->rx_ring;
1009 sw_ring = rxq->sw_ring;
1012 * Retrieve RX context of current packet, if any.
1014 first_seg = rxq->pkt_first_seg;
1015 last_seg = rxq->pkt_last_seg;
1017 while (nb_rx < nb_pkts) {
1020 * The order of operations here is important as the DD status
1021 * bit must not be read after any other descriptor fields.
1022 * rx_ring and rxdp are pointing to volatile data so the order
1023 * of accesses cannot be reordered by the compiler. If they were
1024 * not volatile, they could be reordered which could lead to
1025 * using invalid descriptor fields when read from rxd.
1027 rxdp = &rx_ring[rx_id];
1028 staterr = rxdp->wb.upper.status_error;
1029 if (! (staterr & rte_cpu_to_le_32(E1000_RXD_STAT_DD)))
1036 * Allocate a new mbuf to replenish the RX ring descriptor.
1037 * If the allocation fails:
1038 * - arrange for that RX descriptor to be the first one
1039 * being parsed the next time the receive function is
1040 * invoked [on the same queue].
1042 * - Stop parsing the RX ring and return immediately.
1044 * This policy does not drop the packet received in the RX
1045 * descriptor for which the allocation of a new mbuf failed.
1046 * Thus, it allows that packet to be later retrieved if
1047 * mbuf have been freed in the mean time.
1048 * As a side effect, holding RX descriptors instead of
1049 * systematically giving them back to the NIC may lead to
1050 * RX ring exhaustion situations.
1051 * However, the NIC can gracefully prevent such situations
1052 * to happen by sending specific "back-pressure" flow control
1053 * frames to its peer(s).
1055 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1056 "staterr=0x%x data_len=%u",
1057 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1058 (unsigned) rx_id, (unsigned) staterr,
1059 (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
1061 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1063 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1064 "queue_id=%u", (unsigned) rxq->port_id,
1065 (unsigned) rxq->queue_id);
1066 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1071 rxe = &sw_ring[rx_id];
1073 if (rx_id == rxq->nb_rx_desc)
1076 /* Prefetch next mbuf while processing current one. */
1077 rte_igb_prefetch(sw_ring[rx_id].mbuf);
1080 * When next RX descriptor is on a cache-line boundary,
1081 * prefetch the next 4 RX descriptors and the next 8 pointers
1084 if ((rx_id & 0x3) == 0) {
1085 rte_igb_prefetch(&rx_ring[rx_id]);
1086 rte_igb_prefetch(&sw_ring[rx_id]);
1090 * Update RX descriptor with the physical address of the new
1091 * data buffer of the new allocated mbuf.
1095 dma = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1096 rxdp->read.pkt_addr = dma;
1097 rxdp->read.hdr_addr = 0;
1100 * Set data length & data buffer address of mbuf.
1102 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
1103 rxm->data_len = data_len;
1104 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1107 * If this is the first buffer of the received packet,
1108 * set the pointer to the first mbuf of the packet and
1109 * initialize its context.
1110 * Otherwise, update the total length and the number of segments
1111 * of the current scattered packet, and update the pointer to
1112 * the last mbuf of the current packet.
1114 if (first_seg == NULL) {
1116 first_seg->pkt_len = data_len;
1117 first_seg->nb_segs = 1;
1119 first_seg->pkt_len += data_len;
1120 first_seg->nb_segs++;
1121 last_seg->next = rxm;
1125 * If this is not the last buffer of the received packet,
1126 * update the pointer to the last mbuf of the current scattered
1127 * packet and continue to parse the RX ring.
1129 if (! (staterr & E1000_RXD_STAT_EOP)) {
1135 * This is the last buffer of the received packet.
1136 * If the CRC is not stripped by the hardware:
1137 * - Subtract the CRC length from the total packet length.
1138 * - If the last buffer only contains the whole CRC or a part
1139 * of it, free the mbuf associated to the last buffer.
1140 * If part of the CRC is also contained in the previous
1141 * mbuf, subtract the length of that CRC part from the
1142 * data length of the previous mbuf.
1145 if (unlikely(rxq->crc_len > 0)) {
1146 first_seg->pkt_len -= ETHER_CRC_LEN;
1147 if (data_len <= ETHER_CRC_LEN) {
1148 rte_pktmbuf_free_seg(rxm);
1149 first_seg->nb_segs--;
1150 last_seg->data_len = (uint16_t)
1151 (last_seg->data_len -
1152 (ETHER_CRC_LEN - data_len));
1153 last_seg->next = NULL;
1156 (uint16_t) (data_len - ETHER_CRC_LEN);
1160 * Initialize the first mbuf of the returned packet:
1161 * - RX port identifier,
1162 * - hardware offload data, if any:
1163 * - RSS flag & hash,
1164 * - IP checksum flag,
1165 * - VLAN TCI, if any,
1168 first_seg->port = rxq->port_id;
1169 first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
1172 * The vlan_tci field is only valid when PKT_RX_VLAN is
1173 * set in the pkt_flags field and must be in CPU byte order.
1175 if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
1176 (rxq->flags & IGB_RXQ_FLAG_LB_BSWAP_VLAN)) {
1177 first_seg->vlan_tci =
1178 rte_be_to_cpu_16(rxd.wb.upper.vlan);
1180 first_seg->vlan_tci =
1181 rte_le_to_cpu_16(rxd.wb.upper.vlan);
1183 hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
1184 pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(rxq, hlen_type_rss);
1185 pkt_flags = pkt_flags | rx_desc_status_to_pkt_flags(staterr);
1186 pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr);
1187 first_seg->ol_flags = pkt_flags;
1188 first_seg->packet_type = igb_rxd_pkt_info_to_pkt_type(rxd.wb.
1189 lower.lo_dword.hs_rss.pkt_info);
1191 /* Prefetch data of first segment, if configured to do so. */
1192 rte_packet_prefetch((char *)first_seg->buf_addr +
1193 first_seg->data_off);
1196 * Store the mbuf address into the next entry of the array
1197 * of returned packets.
1199 rx_pkts[nb_rx++] = first_seg;
1202 * Setup receipt context for a new packet.
1208 * Record index of the next RX descriptor to probe.
1210 rxq->rx_tail = rx_id;
1213 * Save receive context.
1215 rxq->pkt_first_seg = first_seg;
1216 rxq->pkt_last_seg = last_seg;
1219 * If the number of free RX descriptors is greater than the RX free
1220 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1222 * Update the RDT with the value of the last processed RX descriptor
1223 * minus 1, to guarantee that the RDT register is never equal to the
1224 * RDH register, which creates a "full" ring situtation from the
1225 * hardware point of view...
1227 nb_hold = (uint16_t) (nb_hold + rxq->nb_rx_hold);
1228 if (nb_hold > rxq->rx_free_thresh) {
1229 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1230 "nb_hold=%u nb_rx=%u",
1231 (unsigned) rxq->port_id, (unsigned) rxq->queue_id,
1232 (unsigned) rx_id, (unsigned) nb_hold,
1234 rx_id = (uint16_t) ((rx_id == 0) ?
1235 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1236 E1000_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
1239 rxq->nb_rx_hold = nb_hold;
1244 * Maximum number of Ring Descriptors.
1246 * Since RDLEN/TDLEN should be multiple of 128bytes, the number of ring
1247 * desscriptors should meet the following condition:
1248 * (num_ring_desc * sizeof(struct e1000_rx/tx_desc)) % 128 == 0
1252 igb_tx_queue_release_mbufs(struct igb_tx_queue *txq)
1256 if (txq->sw_ring != NULL) {
1257 for (i = 0; i < txq->nb_tx_desc; i++) {
1258 if (txq->sw_ring[i].mbuf != NULL) {
1259 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1260 txq->sw_ring[i].mbuf = NULL;
1267 igb_tx_queue_release(struct igb_tx_queue *txq)
1270 igb_tx_queue_release_mbufs(txq);
1271 rte_free(txq->sw_ring);
1277 eth_igb_tx_queue_release(void *txq)
1279 igb_tx_queue_release(txq);
1283 igb_tx_done_cleanup(struct igb_tx_queue *txq, uint32_t free_cnt)
1285 struct igb_tx_entry *sw_ring;
1286 volatile union e1000_adv_tx_desc *txr;
1287 uint16_t tx_first; /* First segment analyzed. */
1288 uint16_t tx_id; /* Current segment being processed. */
1289 uint16_t tx_last; /* Last segment in the current packet. */
1290 uint16_t tx_next; /* First segment of the next packet. */
1295 sw_ring = txq->sw_ring;
1299 * tx_tail is the last sent packet on the sw_ring. Goto the end
1300 * of that packet (the last segment in the packet chain) and
1301 * then the next segment will be the start of the oldest segment
1302 * in the sw_ring. This is the first packet that will be
1303 * attempted to be freed.
1306 /* Get last segment in most recently added packet. */
1307 tx_first = sw_ring[txq->tx_tail].last_id;
1309 /* Get the next segment, which is the oldest segment in ring. */
1310 tx_first = sw_ring[tx_first].next_id;
1312 /* Set the current index to the first. */
1316 * Loop through each packet. For each packet, verify that an
1317 * mbuf exists and that the last segment is free. If so, free
1321 tx_last = sw_ring[tx_id].last_id;
1323 if (sw_ring[tx_last].mbuf) {
1324 if (txr[tx_last].wb.status &
1325 E1000_TXD_STAT_DD) {
1327 * Increment the number of packets
1332 /* Get the start of the next packet. */
1333 tx_next = sw_ring[tx_last].next_id;
1336 * Loop through all segments in a
1340 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
1341 sw_ring[tx_id].mbuf = NULL;
1342 sw_ring[tx_id].last_id = tx_id;
1344 /* Move to next segemnt. */
1345 tx_id = sw_ring[tx_id].next_id;
1347 } while (tx_id != tx_next);
1349 if (unlikely(count == (int)free_cnt))
1353 * mbuf still in use, nothing left to
1359 * There are multiple reasons to be here:
1360 * 1) All the packets on the ring have been
1361 * freed - tx_id is equal to tx_first
1362 * and some packets have been freed.
1364 * 2) Interfaces has not sent a rings worth of
1365 * packets yet, so the segment after tail is
1366 * still empty. Or a previous call to this
1367 * function freed some of the segments but
1368 * not all so there is a hole in the list.
1369 * Hopefully this is a rare case.
1370 * - Walk the list and find the next mbuf. If
1371 * there isn't one, then done.
1373 if (likely((tx_id == tx_first) && (count != 0)))
1377 * Walk the list and find the next mbuf, if any.
1380 /* Move to next segemnt. */
1381 tx_id = sw_ring[tx_id].next_id;
1383 if (sw_ring[tx_id].mbuf)
1386 } while (tx_id != tx_first);
1389 * Determine why previous loop bailed. If there
1390 * is not an mbuf, done.
1392 if (sw_ring[tx_id].mbuf == NULL)
1403 eth_igb_tx_done_cleanup(void *txq, uint32_t free_cnt)
1405 return igb_tx_done_cleanup(txq, free_cnt);
1409 igb_reset_tx_queue_stat(struct igb_tx_queue *txq)
1414 memset((void*)&txq->ctx_cache, 0,
1415 IGB_CTX_NUM * sizeof(struct igb_advctx_info));
1419 igb_reset_tx_queue(struct igb_tx_queue *txq, struct rte_eth_dev *dev)
1421 static const union e1000_adv_tx_desc zeroed_desc = {{0}};
1422 struct igb_tx_entry *txe = txq->sw_ring;
1424 struct e1000_hw *hw;
1426 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1427 /* Zero out HW ring memory */
1428 for (i = 0; i < txq->nb_tx_desc; i++) {
1429 txq->tx_ring[i] = zeroed_desc;
1432 /* Initialize ring entries */
1433 prev = (uint16_t)(txq->nb_tx_desc - 1);
1434 for (i = 0; i < txq->nb_tx_desc; i++) {
1435 volatile union e1000_adv_tx_desc *txd = &(txq->tx_ring[i]);
1437 txd->wb.status = E1000_TXD_STAT_DD;
1440 txe[prev].next_id = i;
1444 txq->txd_type = E1000_ADVTXD_DTYP_DATA;
1445 /* 82575 specific, each tx queue will use 2 hw contexts */
1446 if (hw->mac.type == e1000_82575)
1447 txq->ctx_start = txq->queue_id * IGB_CTX_NUM;
1449 igb_reset_tx_queue_stat(txq);
1453 igb_get_tx_port_offloads_capa(struct rte_eth_dev *dev)
1455 uint64_t tx_offload_capa;
1458 tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT |
1459 DEV_TX_OFFLOAD_IPV4_CKSUM |
1460 DEV_TX_OFFLOAD_UDP_CKSUM |
1461 DEV_TX_OFFLOAD_TCP_CKSUM |
1462 DEV_TX_OFFLOAD_SCTP_CKSUM |
1463 DEV_TX_OFFLOAD_TCP_TSO |
1464 DEV_TX_OFFLOAD_MULTI_SEGS;
1466 return tx_offload_capa;
1470 igb_get_tx_queue_offloads_capa(struct rte_eth_dev *dev)
1472 uint64_t tx_queue_offload_capa;
1474 tx_queue_offload_capa = igb_get_tx_port_offloads_capa(dev);
1476 return tx_queue_offload_capa;
1480 eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
1483 unsigned int socket_id,
1484 const struct rte_eth_txconf *tx_conf)
1486 const struct rte_memzone *tz;
1487 struct igb_tx_queue *txq;
1488 struct e1000_hw *hw;
1492 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1494 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1497 * Validate number of transmit descriptors.
1498 * It must not exceed hardware maximum, and must be multiple
1501 if (nb_desc % IGB_TXD_ALIGN != 0 ||
1502 (nb_desc > E1000_MAX_RING_DESC) ||
1503 (nb_desc < E1000_MIN_RING_DESC)) {
1508 * The tx_free_thresh and tx_rs_thresh values are not used in the 1G
1511 if (tx_conf->tx_free_thresh != 0)
1512 PMD_INIT_LOG(INFO, "The tx_free_thresh parameter is not "
1513 "used for the 1G driver.");
1514 if (tx_conf->tx_rs_thresh != 0)
1515 PMD_INIT_LOG(INFO, "The tx_rs_thresh parameter is not "
1516 "used for the 1G driver.");
1517 if (tx_conf->tx_thresh.wthresh == 0 && hw->mac.type != e1000_82576)
1518 PMD_INIT_LOG(INFO, "To improve 1G driver performance, "
1519 "consider setting the TX WTHRESH value to 4, 8, "
1522 /* Free memory prior to re-allocation if needed */
1523 if (dev->data->tx_queues[queue_idx] != NULL) {
1524 igb_tx_queue_release(dev->data->tx_queues[queue_idx]);
1525 dev->data->tx_queues[queue_idx] = NULL;
1528 /* First allocate the tx queue data structure */
1529 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igb_tx_queue),
1530 RTE_CACHE_LINE_SIZE);
1535 * Allocate TX ring hardware descriptors. A memzone large enough to
1536 * handle the maximum ring size is allocated in order to allow for
1537 * resizing in later calls to the queue setup function.
1539 size = sizeof(union e1000_adv_tx_desc) * E1000_MAX_RING_DESC;
1540 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1541 E1000_ALIGN, socket_id);
1543 igb_tx_queue_release(txq);
1547 txq->nb_tx_desc = nb_desc;
1548 txq->pthresh = tx_conf->tx_thresh.pthresh;
1549 txq->hthresh = tx_conf->tx_thresh.hthresh;
1550 txq->wthresh = tx_conf->tx_thresh.wthresh;
1551 if (txq->wthresh > 0 && hw->mac.type == e1000_82576)
1553 txq->queue_id = queue_idx;
1554 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1555 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1556 txq->port_id = dev->data->port_id;
1558 txq->tdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_TDT(txq->reg_idx));
1559 txq->tx_ring_phys_addr = tz->iova;
1561 txq->tx_ring = (union e1000_adv_tx_desc *) tz->addr;
1562 /* Allocate software ring */
1563 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1564 sizeof(struct igb_tx_entry) * nb_desc,
1565 RTE_CACHE_LINE_SIZE);
1566 if (txq->sw_ring == NULL) {
1567 igb_tx_queue_release(txq);
1570 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1571 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1573 igb_reset_tx_queue(txq, dev);
1574 dev->tx_pkt_burst = eth_igb_xmit_pkts;
1575 dev->tx_pkt_prepare = ð_igb_prep_pkts;
1576 dev->data->tx_queues[queue_idx] = txq;
1577 txq->offloads = offloads;
1583 igb_rx_queue_release_mbufs(struct igb_rx_queue *rxq)
1587 if (rxq->sw_ring != NULL) {
1588 for (i = 0; i < rxq->nb_rx_desc; i++) {
1589 if (rxq->sw_ring[i].mbuf != NULL) {
1590 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1591 rxq->sw_ring[i].mbuf = NULL;
1598 igb_rx_queue_release(struct igb_rx_queue *rxq)
1601 igb_rx_queue_release_mbufs(rxq);
1602 rte_free(rxq->sw_ring);
1608 eth_igb_rx_queue_release(void *rxq)
1610 igb_rx_queue_release(rxq);
1614 igb_reset_rx_queue(struct igb_rx_queue *rxq)
1616 static const union e1000_adv_rx_desc zeroed_desc = {{0}};
1619 /* Zero out HW ring memory */
1620 for (i = 0; i < rxq->nb_rx_desc; i++) {
1621 rxq->rx_ring[i] = zeroed_desc;
1625 rxq->pkt_first_seg = NULL;
1626 rxq->pkt_last_seg = NULL;
1630 igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
1632 uint64_t rx_offload_capa;
1635 rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
1636 DEV_RX_OFFLOAD_VLAN_FILTER |
1637 DEV_RX_OFFLOAD_IPV4_CKSUM |
1638 DEV_RX_OFFLOAD_UDP_CKSUM |
1639 DEV_RX_OFFLOAD_TCP_CKSUM |
1640 DEV_RX_OFFLOAD_JUMBO_FRAME |
1641 DEV_RX_OFFLOAD_KEEP_CRC |
1642 DEV_RX_OFFLOAD_SCATTER;
1644 return rx_offload_capa;
1648 igb_get_rx_queue_offloads_capa(struct rte_eth_dev *dev)
1650 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1651 uint64_t rx_queue_offload_capa;
1653 switch (hw->mac.type) {
1654 case e1000_vfadapt_i350:
1656 * As only one Rx queue can be used, let per queue offloading
1657 * capability be same to per port queue offloading capability
1658 * for better convenience.
1660 rx_queue_offload_capa = igb_get_rx_port_offloads_capa(dev);
1663 rx_queue_offload_capa = 0;
1665 return rx_queue_offload_capa;
1669 eth_igb_rx_queue_setup(struct rte_eth_dev *dev,
1672 unsigned int socket_id,
1673 const struct rte_eth_rxconf *rx_conf,
1674 struct rte_mempool *mp)
1676 const struct rte_memzone *rz;
1677 struct igb_rx_queue *rxq;
1678 struct e1000_hw *hw;
1682 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1684 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1687 * Validate number of receive descriptors.
1688 * It must not exceed hardware maximum, and must be multiple
1691 if (nb_desc % IGB_RXD_ALIGN != 0 ||
1692 (nb_desc > E1000_MAX_RING_DESC) ||
1693 (nb_desc < E1000_MIN_RING_DESC)) {
1697 /* Free memory prior to re-allocation if needed */
1698 if (dev->data->rx_queues[queue_idx] != NULL) {
1699 igb_rx_queue_release(dev->data->rx_queues[queue_idx]);
1700 dev->data->rx_queues[queue_idx] = NULL;
1703 /* First allocate the RX queue data structure. */
1704 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igb_rx_queue),
1705 RTE_CACHE_LINE_SIZE);
1708 rxq->offloads = offloads;
1710 rxq->nb_rx_desc = nb_desc;
1711 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1712 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1713 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1714 if (rxq->wthresh > 0 &&
1715 (hw->mac.type == e1000_82576 || hw->mac.type == e1000_vfadapt_i350))
1717 rxq->drop_en = rx_conf->rx_drop_en;
1718 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1719 rxq->queue_id = queue_idx;
1720 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1721 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1722 rxq->port_id = dev->data->port_id;
1723 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1724 rxq->crc_len = ETHER_CRC_LEN;
1729 * Allocate RX ring hardware descriptors. A memzone large enough to
1730 * handle the maximum ring size is allocated in order to allow for
1731 * resizing in later calls to the queue setup function.
1733 size = sizeof(union e1000_adv_rx_desc) * E1000_MAX_RING_DESC;
1734 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1735 E1000_ALIGN, socket_id);
1737 igb_rx_queue_release(rxq);
1740 rxq->rdt_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDT(rxq->reg_idx));
1741 rxq->rdh_reg_addr = E1000_PCI_REG_ADDR(hw, E1000_RDH(rxq->reg_idx));
1742 rxq->rx_ring_phys_addr = rz->iova;
1743 rxq->rx_ring = (union e1000_adv_rx_desc *) rz->addr;
1745 /* Allocate software ring. */
1746 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1747 sizeof(struct igb_rx_entry) * nb_desc,
1748 RTE_CACHE_LINE_SIZE);
1749 if (rxq->sw_ring == NULL) {
1750 igb_rx_queue_release(rxq);
1753 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%"PRIx64,
1754 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1756 dev->data->rx_queues[queue_idx] = rxq;
1757 igb_reset_rx_queue(rxq);
1763 eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1765 #define IGB_RXQ_SCAN_INTERVAL 4
1766 volatile union e1000_adv_rx_desc *rxdp;
1767 struct igb_rx_queue *rxq;
1770 rxq = dev->data->rx_queues[rx_queue_id];
1771 rxdp = &(rxq->rx_ring[rxq->rx_tail]);
1773 while ((desc < rxq->nb_rx_desc) &&
1774 (rxdp->wb.upper.status_error & E1000_RXD_STAT_DD)) {
1775 desc += IGB_RXQ_SCAN_INTERVAL;
1776 rxdp += IGB_RXQ_SCAN_INTERVAL;
1777 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
1778 rxdp = &(rxq->rx_ring[rxq->rx_tail +
1779 desc - rxq->nb_rx_desc]);
1786 eth_igb_rx_descriptor_done(void *rx_queue, uint16_t offset)
1788 volatile union e1000_adv_rx_desc *rxdp;
1789 struct igb_rx_queue *rxq = rx_queue;
1792 if (unlikely(offset >= rxq->nb_rx_desc))
1794 desc = rxq->rx_tail + offset;
1795 if (desc >= rxq->nb_rx_desc)
1796 desc -= rxq->nb_rx_desc;
1798 rxdp = &rxq->rx_ring[desc];
1799 return !!(rxdp->wb.upper.status_error & E1000_RXD_STAT_DD);
1803 eth_igb_rx_descriptor_status(void *rx_queue, uint16_t offset)
1805 struct igb_rx_queue *rxq = rx_queue;
1806 volatile uint32_t *status;
1809 if (unlikely(offset >= rxq->nb_rx_desc))
1812 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
1813 return RTE_ETH_RX_DESC_UNAVAIL;
1815 desc = rxq->rx_tail + offset;
1816 if (desc >= rxq->nb_rx_desc)
1817 desc -= rxq->nb_rx_desc;
1819 status = &rxq->rx_ring[desc].wb.upper.status_error;
1820 if (*status & rte_cpu_to_le_32(E1000_RXD_STAT_DD))
1821 return RTE_ETH_RX_DESC_DONE;
1823 return RTE_ETH_RX_DESC_AVAIL;
1827 eth_igb_tx_descriptor_status(void *tx_queue, uint16_t offset)
1829 struct igb_tx_queue *txq = tx_queue;
1830 volatile uint32_t *status;
1833 if (unlikely(offset >= txq->nb_tx_desc))
1836 desc = txq->tx_tail + offset;
1837 if (desc >= txq->nb_tx_desc)
1838 desc -= txq->nb_tx_desc;
1840 status = &txq->tx_ring[desc].wb.status;
1841 if (*status & rte_cpu_to_le_32(E1000_TXD_STAT_DD))
1842 return RTE_ETH_TX_DESC_DONE;
1844 return RTE_ETH_TX_DESC_FULL;
1848 igb_dev_clear_queues(struct rte_eth_dev *dev)
1851 struct igb_tx_queue *txq;
1852 struct igb_rx_queue *rxq;
1854 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1855 txq = dev->data->tx_queues[i];
1857 igb_tx_queue_release_mbufs(txq);
1858 igb_reset_tx_queue(txq, dev);
1862 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1863 rxq = dev->data->rx_queues[i];
1865 igb_rx_queue_release_mbufs(rxq);
1866 igb_reset_rx_queue(rxq);
1872 igb_dev_free_queues(struct rte_eth_dev *dev)
1876 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1877 eth_igb_rx_queue_release(dev->data->rx_queues[i]);
1878 dev->data->rx_queues[i] = NULL;
1880 dev->data->nb_rx_queues = 0;
1882 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1883 eth_igb_tx_queue_release(dev->data->tx_queues[i]);
1884 dev->data->tx_queues[i] = NULL;
1886 dev->data->nb_tx_queues = 0;
1890 * Receive Side Scaling (RSS).
1891 * See section 7.1.1.7 in the following document:
1892 * "Intel 82576 GbE Controller Datasheet" - Revision 2.45 October 2009
1895 * The source and destination IP addresses of the IP header and the source and
1896 * destination ports of TCP/UDP headers, if any, of received packets are hashed
1897 * against a configurable random key to compute a 32-bit RSS hash result.
1898 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
1899 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
1900 * RSS output index which is used as the RX queue index where to store the
1902 * The following output is supplied in the RX write-back descriptor:
1903 * - 32-bit result of the Microsoft RSS hash function,
1904 * - 4-bit RSS type field.
1908 * RSS random key supplied in section 7.1.1.7.3 of the Intel 82576 datasheet.
1909 * Used as the default key.
1911 static uint8_t rss_intel_key[40] = {
1912 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
1913 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
1914 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
1915 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
1916 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
1920 igb_rss_disable(struct rte_eth_dev *dev)
1922 struct e1000_hw *hw;
1925 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1926 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1927 mrqc &= ~E1000_MRQC_ENABLE_MASK;
1928 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1932 igb_hw_rss_hash_set(struct e1000_hw *hw, struct rte_eth_rss_conf *rss_conf)
1940 hash_key = rss_conf->rss_key;
1941 if (hash_key != NULL) {
1942 /* Fill in RSS hash key */
1943 for (i = 0; i < 10; i++) {
1944 rss_key = hash_key[(i * 4)];
1945 rss_key |= hash_key[(i * 4) + 1] << 8;
1946 rss_key |= hash_key[(i * 4) + 2] << 16;
1947 rss_key |= hash_key[(i * 4) + 3] << 24;
1948 E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK(0), i, rss_key);
1952 /* Set configured hashing protocols in MRQC register */
1953 rss_hf = rss_conf->rss_hf;
1954 mrqc = E1000_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
1955 if (rss_hf & ETH_RSS_IPV4)
1956 mrqc |= E1000_MRQC_RSS_FIELD_IPV4;
1957 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
1958 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_TCP;
1959 if (rss_hf & ETH_RSS_IPV6)
1960 mrqc |= E1000_MRQC_RSS_FIELD_IPV6;
1961 if (rss_hf & ETH_RSS_IPV6_EX)
1962 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_EX;
1963 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
1964 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP;
1965 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
1966 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
1967 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
1968 mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
1969 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
1970 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
1971 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
1972 mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP_EX;
1973 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
1977 eth_igb_rss_hash_update(struct rte_eth_dev *dev,
1978 struct rte_eth_rss_conf *rss_conf)
1980 struct e1000_hw *hw;
1984 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1987 * Before changing anything, first check that the update RSS operation
1988 * does not attempt to disable RSS, if RSS was enabled at
1989 * initialization time, or does not attempt to enable RSS, if RSS was
1990 * disabled at initialization time.
1992 rss_hf = rss_conf->rss_hf & IGB_RSS_OFFLOAD_ALL;
1993 mrqc = E1000_READ_REG(hw, E1000_MRQC);
1994 if (!(mrqc & E1000_MRQC_ENABLE_MASK)) { /* RSS disabled */
1995 if (rss_hf != 0) /* Enable RSS */
1997 return 0; /* Nothing to do */
2000 if (rss_hf == 0) /* Disable RSS */
2002 igb_hw_rss_hash_set(hw, rss_conf);
2006 int eth_igb_rss_hash_conf_get(struct rte_eth_dev *dev,
2007 struct rte_eth_rss_conf *rss_conf)
2009 struct e1000_hw *hw;
2016 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2017 hash_key = rss_conf->rss_key;
2018 if (hash_key != NULL) {
2019 /* Return RSS hash key */
2020 for (i = 0; i < 10; i++) {
2021 rss_key = E1000_READ_REG_ARRAY(hw, E1000_RSSRK(0), i);
2022 hash_key[(i * 4)] = rss_key & 0x000000FF;
2023 hash_key[(i * 4) + 1] = (rss_key >> 8) & 0x000000FF;
2024 hash_key[(i * 4) + 2] = (rss_key >> 16) & 0x000000FF;
2025 hash_key[(i * 4) + 3] = (rss_key >> 24) & 0x000000FF;
2029 /* Get RSS functions configured in MRQC register */
2030 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2031 if ((mrqc & E1000_MRQC_ENABLE_RSS_4Q) == 0) { /* RSS is disabled */
2032 rss_conf->rss_hf = 0;
2036 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4)
2037 rss_hf |= ETH_RSS_IPV4;
2038 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP)
2039 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2040 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6)
2041 rss_hf |= ETH_RSS_IPV6;
2042 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_EX)
2043 rss_hf |= ETH_RSS_IPV6_EX;
2044 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP)
2045 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2046 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP_EX)
2047 rss_hf |= ETH_RSS_IPV6_TCP_EX;
2048 if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_UDP)
2049 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2050 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP)
2051 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2052 if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_UDP_EX)
2053 rss_hf |= ETH_RSS_IPV6_UDP_EX;
2054 rss_conf->rss_hf = rss_hf;
2059 igb_rss_configure(struct rte_eth_dev *dev)
2061 struct rte_eth_rss_conf rss_conf;
2062 struct e1000_hw *hw;
2066 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2068 /* Fill in redirection table. */
2069 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2070 for (i = 0; i < 128; i++) {
2077 q_idx = (uint8_t) ((dev->data->nb_rx_queues > 1) ?
2078 i % dev->data->nb_rx_queues : 0);
2079 reta.bytes[i & 3] = (uint8_t) (q_idx << shift);
2081 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2085 * Configure the RSS key and the RSS protocols used to compute
2086 * the RSS hash of input packets.
2088 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
2089 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2090 igb_rss_disable(dev);
2093 if (rss_conf.rss_key == NULL)
2094 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2095 igb_hw_rss_hash_set(hw, &rss_conf);
2099 * Check if the mac type support VMDq or not.
2100 * Return 1 if it supports, otherwise, return 0.
2103 igb_is_vmdq_supported(const struct rte_eth_dev *dev)
2105 const struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2107 switch (hw->mac.type) {
2128 PMD_INIT_LOG(ERR, "Cannot support VMDq feature");
2134 igb_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
2136 struct rte_eth_vmdq_rx_conf *cfg;
2137 struct e1000_hw *hw;
2138 uint32_t mrqc, vt_ctl, vmolr, rctl;
2141 PMD_INIT_FUNC_TRACE();
2143 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2144 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
2146 /* Check if mac type can support VMDq, return value of 0 means NOT support */
2147 if (igb_is_vmdq_supported(dev) == 0)
2150 igb_rss_disable(dev);
2152 /* RCTL: eanble VLAN filter */
2153 rctl = E1000_READ_REG(hw, E1000_RCTL);
2154 rctl |= E1000_RCTL_VFE;
2155 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2157 /* MRQC: enable vmdq */
2158 mrqc = E1000_READ_REG(hw, E1000_MRQC);
2159 mrqc |= E1000_MRQC_ENABLE_VMDQ;
2160 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2162 /* VTCTL: pool selection according to VLAN tag */
2163 vt_ctl = E1000_READ_REG(hw, E1000_VT_CTL);
2164 if (cfg->enable_default_pool)
2165 vt_ctl |= (cfg->default_pool << E1000_VT_CTL_DEFAULT_POOL_SHIFT);
2166 vt_ctl |= E1000_VT_CTL_IGNORE_MAC;
2167 E1000_WRITE_REG(hw, E1000_VT_CTL, vt_ctl);
2169 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2170 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2171 vmolr &= ~(E1000_VMOLR_AUPE | E1000_VMOLR_ROMPE |
2172 E1000_VMOLR_ROPE | E1000_VMOLR_BAM |
2175 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_UNTAG)
2176 vmolr |= E1000_VMOLR_AUPE;
2177 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_MC)
2178 vmolr |= E1000_VMOLR_ROMPE;
2179 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_HASH_UC)
2180 vmolr |= E1000_VMOLR_ROPE;
2181 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_BROADCAST)
2182 vmolr |= E1000_VMOLR_BAM;
2183 if (cfg->rx_mode & ETH_VMDQ_ACCEPT_MULTICAST)
2184 vmolr |= E1000_VMOLR_MPME;
2186 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2190 * VMOLR: set STRVLAN as 1 if IGMAC in VTCTL is set as 1
2191 * Both 82576 and 82580 support it
2193 if (hw->mac.type != e1000_i350) {
2194 for (i = 0; i < E1000_VMOLR_SIZE; i++) {
2195 vmolr = E1000_READ_REG(hw, E1000_VMOLR(i));
2196 vmolr |= E1000_VMOLR_STRVLAN;
2197 E1000_WRITE_REG(hw, E1000_VMOLR(i), vmolr);
2201 /* VFTA - enable all vlan filters */
2202 for (i = 0; i < IGB_VFTA_SIZE; i++)
2203 E1000_WRITE_REG(hw, (E1000_VFTA+(i*4)), UINT32_MAX);
2205 /* VFRE: 8 pools enabling for rx, both 82576 and i350 support it */
2206 if (hw->mac.type != e1000_82580)
2207 E1000_WRITE_REG(hw, E1000_VFRE, E1000_MBVFICR_VFREQ_MASK);
2210 * RAH/RAL - allow pools to read specific mac addresses
2211 * In this case, all pools should be able to read from mac addr 0
2213 E1000_WRITE_REG(hw, E1000_RAH(0), (E1000_RAH_AV | UINT16_MAX));
2214 E1000_WRITE_REG(hw, E1000_RAL(0), UINT32_MAX);
2216 /* VLVF: set up filters for vlan tags as configured */
2217 for (i = 0; i < cfg->nb_pool_maps; i++) {
2218 /* set vlan id in VF register and set the valid bit */
2219 E1000_WRITE_REG(hw, E1000_VLVF(i), (E1000_VLVF_VLANID_ENABLE | \
2220 (cfg->pool_map[i].vlan_id & ETH_VLAN_ID_MAX) | \
2221 ((cfg->pool_map[i].pools << E1000_VLVF_POOLSEL_SHIFT ) & \
2222 E1000_VLVF_POOLSEL_MASK)));
2225 E1000_WRITE_FLUSH(hw);
2231 /*********************************************************************
2233 * Enable receive unit.
2235 **********************************************************************/
2238 igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
2240 struct igb_rx_entry *rxe = rxq->sw_ring;
2244 /* Initialize software ring entries. */
2245 for (i = 0; i < rxq->nb_rx_desc; i++) {
2246 volatile union e1000_adv_rx_desc *rxd;
2247 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2250 PMD_INIT_LOG(ERR, "RX mbuf alloc failed "
2251 "queue_id=%hu", rxq->queue_id);
2255 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2256 rxd = &rxq->rx_ring[i];
2257 rxd->read.hdr_addr = 0;
2258 rxd->read.pkt_addr = dma_addr;
2265 #define E1000_MRQC_DEF_Q_SHIFT (3)
2267 igb_dev_mq_rx_configure(struct rte_eth_dev *dev)
2269 struct e1000_hw *hw =
2270 E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2273 if (RTE_ETH_DEV_SRIOV(dev).active == ETH_8_POOLS) {
2275 * SRIOV active scheme
2276 * FIXME if support RSS together with VMDq & SRIOV
2278 mrqc = E1000_MRQC_ENABLE_VMDQ;
2279 /* 011b Def_Q ignore, according to VT_CTL.DEF_PL */
2280 mrqc |= 0x3 << E1000_MRQC_DEF_Q_SHIFT;
2281 E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
2282 } else if(RTE_ETH_DEV_SRIOV(dev).active == 0) {
2284 * SRIOV inactive scheme
2286 switch (dev->data->dev_conf.rxmode.mq_mode) {
2288 igb_rss_configure(dev);
2290 case ETH_MQ_RX_VMDQ_ONLY:
2291 /*Configure general VMDQ only RX parameters*/
2292 igb_vmdq_rx_hw_configure(dev);
2294 case ETH_MQ_RX_NONE:
2295 /* if mq_mode is none, disable rss mode.*/
2297 igb_rss_disable(dev);
2306 eth_igb_rx_init(struct rte_eth_dev *dev)
2308 struct rte_eth_rxmode *rxmode;
2309 struct e1000_hw *hw;
2310 struct igb_rx_queue *rxq;
2315 uint16_t rctl_bsize;
2319 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2323 * Make sure receives are disabled while setting
2324 * up the descriptor ring.
2326 rctl = E1000_READ_REG(hw, E1000_RCTL);
2327 E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
2329 rxmode = &dev->data->dev_conf.rxmode;
2332 * Configure support of jumbo frames, if any.
2334 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2335 rctl |= E1000_RCTL_LPE;
2338 * Set maximum packet length by default, and might be updated
2339 * together with enabling/disabling dual VLAN.
2341 E1000_WRITE_REG(hw, E1000_RLPML,
2342 dev->data->dev_conf.rxmode.max_rx_pkt_len +
2345 rctl &= ~E1000_RCTL_LPE;
2347 /* Configure and enable each RX queue. */
2349 dev->rx_pkt_burst = eth_igb_recv_pkts;
2350 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2354 rxq = dev->data->rx_queues[i];
2358 * i350 and i354 vlan packets have vlan tags byte swapped.
2360 if (hw->mac.type == e1000_i350 || hw->mac.type == e1000_i354) {
2361 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2362 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2364 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2367 /* Allocate buffers for descriptor rings and set up queue */
2368 ret = igb_alloc_rx_queue_mbufs(rxq);
2373 * Reset crc_len in case it was changed after queue setup by a
2376 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2377 rxq->crc_len = ETHER_CRC_LEN;
2381 bus_addr = rxq->rx_ring_phys_addr;
2382 E1000_WRITE_REG(hw, E1000_RDLEN(rxq->reg_idx),
2384 sizeof(union e1000_adv_rx_desc));
2385 E1000_WRITE_REG(hw, E1000_RDBAH(rxq->reg_idx),
2386 (uint32_t)(bus_addr >> 32));
2387 E1000_WRITE_REG(hw, E1000_RDBAL(rxq->reg_idx), (uint32_t)bus_addr);
2389 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2392 * Configure RX buffer size.
2394 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2395 RTE_PKTMBUF_HEADROOM);
2396 if (buf_size >= 1024) {
2398 * Configure the BSIZEPACKET field of the SRRCTL
2399 * register of the queue.
2400 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2401 * If this field is equal to 0b, then RCTL.BSIZE
2402 * determines the RX packet buffer size.
2404 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2405 E1000_SRRCTL_BSIZEPKT_MASK);
2406 buf_size = (uint16_t) ((srrctl &
2407 E1000_SRRCTL_BSIZEPKT_MASK) <<
2408 E1000_SRRCTL_BSIZEPKT_SHIFT);
2410 /* It adds dual VLAN length for supporting dual VLAN */
2411 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2412 2 * VLAN_TAG_SIZE) > buf_size){
2413 if (!dev->data->scattered_rx)
2415 "forcing scatter mode");
2416 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2417 dev->data->scattered_rx = 1;
2421 * Use BSIZE field of the device RCTL register.
2423 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2424 rctl_bsize = buf_size;
2425 if (!dev->data->scattered_rx)
2426 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2427 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2428 dev->data->scattered_rx = 1;
2431 /* Set if packets are dropped when no descriptors available */
2433 srrctl |= E1000_SRRCTL_DROP_EN;
2435 E1000_WRITE_REG(hw, E1000_SRRCTL(rxq->reg_idx), srrctl);
2437 /* Enable this RX queue. */
2438 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(rxq->reg_idx));
2439 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2440 rxdctl &= 0xFFF00000;
2441 rxdctl |= (rxq->pthresh & 0x1F);
2442 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2443 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2444 E1000_WRITE_REG(hw, E1000_RXDCTL(rxq->reg_idx), rxdctl);
2447 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2448 if (!dev->data->scattered_rx)
2449 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2450 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2451 dev->data->scattered_rx = 1;
2455 * Setup BSIZE field of RCTL register, if needed.
2456 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
2457 * register, since the code above configures the SRRCTL register of
2458 * the RX queue in such a case.
2459 * All configurable sizes are:
2460 * 16384: rctl |= (E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX);
2461 * 8192: rctl |= (E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX);
2462 * 4096: rctl |= (E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX);
2463 * 2048: rctl |= E1000_RCTL_SZ_2048;
2464 * 1024: rctl |= E1000_RCTL_SZ_1024;
2465 * 512: rctl |= E1000_RCTL_SZ_512;
2466 * 256: rctl |= E1000_RCTL_SZ_256;
2468 if (rctl_bsize > 0) {
2469 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
2470 rctl |= E1000_RCTL_SZ_512;
2471 else /* 256 <= buf_size < 512 - use 256 */
2472 rctl |= E1000_RCTL_SZ_256;
2476 * Configure RSS if device configured with multiple RX queues.
2478 igb_dev_mq_rx_configure(dev);
2480 /* Update the rctl since igb_dev_mq_rx_configure may change its value */
2481 rctl |= E1000_READ_REG(hw, E1000_RCTL);
2484 * Setup the Checksum Register.
2485 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
2487 rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
2488 rxcsum |= E1000_RXCSUM_PCSD;
2490 /* Enable both L3/L4 rx checksum offload */
2491 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
2492 rxcsum |= E1000_RXCSUM_IPOFL;
2494 rxcsum &= ~E1000_RXCSUM_IPOFL;
2495 if (rxmode->offloads &
2496 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM))
2497 rxcsum |= E1000_RXCSUM_TUOFL;
2499 rxcsum &= ~E1000_RXCSUM_TUOFL;
2500 if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2501 rxcsum |= E1000_RXCSUM_CRCOFL;
2503 rxcsum &= ~E1000_RXCSUM_CRCOFL;
2505 E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
2507 /* Setup the Receive Control Register. */
2508 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) {
2509 rctl &= ~E1000_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
2511 /* clear STRCRC bit in all queues */
2512 if (hw->mac.type == e1000_i350 ||
2513 hw->mac.type == e1000_i210 ||
2514 hw->mac.type == e1000_i211 ||
2515 hw->mac.type == e1000_i354) {
2516 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2517 rxq = dev->data->rx_queues[i];
2518 uint32_t dvmolr = E1000_READ_REG(hw,
2519 E1000_DVMOLR(rxq->reg_idx));
2520 dvmolr &= ~E1000_DVMOLR_STRCRC;
2521 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2525 rctl |= E1000_RCTL_SECRC; /* Strip Ethernet CRC. */
2527 /* set STRCRC bit in all queues */
2528 if (hw->mac.type == e1000_i350 ||
2529 hw->mac.type == e1000_i210 ||
2530 hw->mac.type == e1000_i211 ||
2531 hw->mac.type == e1000_i354) {
2532 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2533 rxq = dev->data->rx_queues[i];
2534 uint32_t dvmolr = E1000_READ_REG(hw,
2535 E1000_DVMOLR(rxq->reg_idx));
2536 dvmolr |= E1000_DVMOLR_STRCRC;
2537 E1000_WRITE_REG(hw, E1000_DVMOLR(rxq->reg_idx), dvmolr);
2542 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
2543 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
2544 E1000_RCTL_RDMTS_HALF |
2545 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
2547 /* Make sure VLAN Filters are off. */
2548 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_VMDQ_ONLY)
2549 rctl &= ~E1000_RCTL_VFE;
2550 /* Don't store bad packets. */
2551 rctl &= ~E1000_RCTL_SBP;
2553 /* Enable Receives. */
2554 E1000_WRITE_REG(hw, E1000_RCTL, rctl);
2557 * Setup the HW Rx Head and Tail Descriptor Pointers.
2558 * This needs to be done after enable.
2560 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2561 rxq = dev->data->rx_queues[i];
2562 E1000_WRITE_REG(hw, E1000_RDH(rxq->reg_idx), 0);
2563 E1000_WRITE_REG(hw, E1000_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
2569 /*********************************************************************
2571 * Enable transmit unit.
2573 **********************************************************************/
2575 eth_igb_tx_init(struct rte_eth_dev *dev)
2577 struct e1000_hw *hw;
2578 struct igb_tx_queue *txq;
2583 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2585 /* Setup the Base and Length of the Tx Descriptor Rings. */
2586 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2588 txq = dev->data->tx_queues[i];
2589 bus_addr = txq->tx_ring_phys_addr;
2591 E1000_WRITE_REG(hw, E1000_TDLEN(txq->reg_idx),
2593 sizeof(union e1000_adv_tx_desc));
2594 E1000_WRITE_REG(hw, E1000_TDBAH(txq->reg_idx),
2595 (uint32_t)(bus_addr >> 32));
2596 E1000_WRITE_REG(hw, E1000_TDBAL(txq->reg_idx), (uint32_t)bus_addr);
2598 /* Setup the HW Tx Head and Tail descriptor pointers. */
2599 E1000_WRITE_REG(hw, E1000_TDT(txq->reg_idx), 0);
2600 E1000_WRITE_REG(hw, E1000_TDH(txq->reg_idx), 0);
2602 /* Setup Transmit threshold registers. */
2603 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(txq->reg_idx));
2604 txdctl |= txq->pthresh & 0x1F;
2605 txdctl |= ((txq->hthresh & 0x1F) << 8);
2606 txdctl |= ((txq->wthresh & 0x1F) << 16);
2607 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2608 E1000_WRITE_REG(hw, E1000_TXDCTL(txq->reg_idx), txdctl);
2611 /* Program the Transmit Control Register. */
2612 tctl = E1000_READ_REG(hw, E1000_TCTL);
2613 tctl &= ~E1000_TCTL_CT;
2614 tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN |
2615 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT));
2617 e1000_config_collision_dist(hw);
2619 /* This write will effectively turn on the transmit unit. */
2620 E1000_WRITE_REG(hw, E1000_TCTL, tctl);
2623 /*********************************************************************
2625 * Enable VF receive unit.
2627 **********************************************************************/
2629 eth_igbvf_rx_init(struct rte_eth_dev *dev)
2631 struct e1000_hw *hw;
2632 struct igb_rx_queue *rxq;
2635 uint16_t rctl_bsize;
2639 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2642 e1000_rlpml_set_vf(hw,
2643 (uint16_t)(dev->data->dev_conf.rxmode.max_rx_pkt_len +
2646 /* Configure and enable each RX queue. */
2648 dev->rx_pkt_burst = eth_igb_recv_pkts;
2649 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2653 rxq = dev->data->rx_queues[i];
2657 * i350VF LB vlan packets have vlan tags byte swapped.
2659 if (hw->mac.type == e1000_vfadapt_i350) {
2660 rxq->flags |= IGB_RXQ_FLAG_LB_BSWAP_VLAN;
2661 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap required");
2663 PMD_INIT_LOG(DEBUG, "IGB rx vlan bswap not required");
2666 /* Allocate buffers for descriptor rings and set up queue */
2667 ret = igb_alloc_rx_queue_mbufs(rxq);
2671 bus_addr = rxq->rx_ring_phys_addr;
2672 E1000_WRITE_REG(hw, E1000_RDLEN(i),
2674 sizeof(union e1000_adv_rx_desc));
2675 E1000_WRITE_REG(hw, E1000_RDBAH(i),
2676 (uint32_t)(bus_addr >> 32));
2677 E1000_WRITE_REG(hw, E1000_RDBAL(i), (uint32_t)bus_addr);
2679 srrctl = E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2682 * Configure RX buffer size.
2684 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2685 RTE_PKTMBUF_HEADROOM);
2686 if (buf_size >= 1024) {
2688 * Configure the BSIZEPACKET field of the SRRCTL
2689 * register of the queue.
2690 * Value is in 1 KB resolution, from 1 KB to 127 KB.
2691 * If this field is equal to 0b, then RCTL.BSIZE
2692 * determines the RX packet buffer size.
2694 srrctl |= ((buf_size >> E1000_SRRCTL_BSIZEPKT_SHIFT) &
2695 E1000_SRRCTL_BSIZEPKT_MASK);
2696 buf_size = (uint16_t) ((srrctl &
2697 E1000_SRRCTL_BSIZEPKT_MASK) <<
2698 E1000_SRRCTL_BSIZEPKT_SHIFT);
2700 /* It adds dual VLAN length for supporting dual VLAN */
2701 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
2702 2 * VLAN_TAG_SIZE) > buf_size){
2703 if (!dev->data->scattered_rx)
2705 "forcing scatter mode");
2706 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2707 dev->data->scattered_rx = 1;
2711 * Use BSIZE field of the device RCTL register.
2713 if ((rctl_bsize == 0) || (rctl_bsize > buf_size))
2714 rctl_bsize = buf_size;
2715 if (!dev->data->scattered_rx)
2716 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2717 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2718 dev->data->scattered_rx = 1;
2721 /* Set if packets are dropped when no descriptors available */
2723 srrctl |= E1000_SRRCTL_DROP_EN;
2725 E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl);
2727 /* Enable this RX queue. */
2728 rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i));
2729 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2730 rxdctl &= 0xFFF00000;
2731 rxdctl |= (rxq->pthresh & 0x1F);
2732 rxdctl |= ((rxq->hthresh & 0x1F) << 8);
2733 if (hw->mac.type == e1000_vfadapt) {
2735 * Workaround of 82576 VF Erratum
2736 * force set WTHRESH to 1
2737 * to avoid Write-Back not triggered sometimes
2740 PMD_INIT_LOG(DEBUG, "Force set RX WTHRESH to 1 !");
2743 rxdctl |= ((rxq->wthresh & 0x1F) << 16);
2744 E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl);
2747 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
2748 if (!dev->data->scattered_rx)
2749 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
2750 dev->rx_pkt_burst = eth_igb_recv_scattered_pkts;
2751 dev->data->scattered_rx = 1;
2755 * Setup the HW Rx Head and Tail Descriptor Pointers.
2756 * This needs to be done after enable.
2758 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2759 rxq = dev->data->rx_queues[i];
2760 E1000_WRITE_REG(hw, E1000_RDH(i), 0);
2761 E1000_WRITE_REG(hw, E1000_RDT(i), rxq->nb_rx_desc - 1);
2767 /*********************************************************************
2769 * Enable VF transmit unit.
2771 **********************************************************************/
2773 eth_igbvf_tx_init(struct rte_eth_dev *dev)
2775 struct e1000_hw *hw;
2776 struct igb_tx_queue *txq;
2780 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2782 /* Setup the Base and Length of the Tx Descriptor Rings. */
2783 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2786 txq = dev->data->tx_queues[i];
2787 bus_addr = txq->tx_ring_phys_addr;
2788 E1000_WRITE_REG(hw, E1000_TDLEN(i),
2790 sizeof(union e1000_adv_tx_desc));
2791 E1000_WRITE_REG(hw, E1000_TDBAH(i),
2792 (uint32_t)(bus_addr >> 32));
2793 E1000_WRITE_REG(hw, E1000_TDBAL(i), (uint32_t)bus_addr);
2795 /* Setup the HW Tx Head and Tail descriptor pointers. */
2796 E1000_WRITE_REG(hw, E1000_TDT(i), 0);
2797 E1000_WRITE_REG(hw, E1000_TDH(i), 0);
2799 /* Setup Transmit threshold registers. */
2800 txdctl = E1000_READ_REG(hw, E1000_TXDCTL(i));
2801 txdctl |= txq->pthresh & 0x1F;
2802 txdctl |= ((txq->hthresh & 0x1F) << 8);
2803 if (hw->mac.type == e1000_82576) {
2805 * Workaround of 82576 VF Erratum
2806 * force set WTHRESH to 1
2807 * to avoid Write-Back not triggered sometimes
2810 PMD_INIT_LOG(DEBUG, "Force set TX WTHRESH to 1 !");
2813 txdctl |= ((txq->wthresh & 0x1F) << 16);
2814 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2815 E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl);
2821 igb_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2822 struct rte_eth_rxq_info *qinfo)
2824 struct igb_rx_queue *rxq;
2826 rxq = dev->data->rx_queues[queue_id];
2828 qinfo->mp = rxq->mb_pool;
2829 qinfo->scattered_rx = dev->data->scattered_rx;
2830 qinfo->nb_desc = rxq->nb_rx_desc;
2832 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2833 qinfo->conf.rx_drop_en = rxq->drop_en;
2834 qinfo->conf.offloads = rxq->offloads;
2838 igb_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2839 struct rte_eth_txq_info *qinfo)
2841 struct igb_tx_queue *txq;
2843 txq = dev->data->tx_queues[queue_id];
2845 qinfo->nb_desc = txq->nb_tx_desc;
2847 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2848 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2849 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2850 qinfo->conf.offloads = txq->offloads;
2854 igb_rss_conf_init(struct igb_rte_flow_rss_conf *out,
2855 const struct rte_flow_action_rss *in)
2857 if (in->key_len > RTE_DIM(out->key) ||
2858 in->queue_num > RTE_DIM(out->queue))
2860 out->conf = (struct rte_flow_action_rss){
2864 .key_len = in->key_len,
2865 .queue_num = in->queue_num,
2866 .key = memcpy(out->key, in->key, in->key_len),
2867 .queue = memcpy(out->queue, in->queue,
2868 sizeof(*in->queue) * in->queue_num),
2874 igb_action_rss_same(const struct rte_flow_action_rss *comp,
2875 const struct rte_flow_action_rss *with)
2877 return (comp->func == with->func &&
2878 comp->level == with->level &&
2879 comp->types == with->types &&
2880 comp->key_len == with->key_len &&
2881 comp->queue_num == with->queue_num &&
2882 !memcmp(comp->key, with->key, with->key_len) &&
2883 !memcmp(comp->queue, with->queue,
2884 sizeof(*with->queue) * with->queue_num));
2888 igb_config_rss_filter(struct rte_eth_dev *dev,
2889 struct igb_rte_flow_rss_conf *conf, bool add)
2893 struct rte_eth_rss_conf rss_conf = {
2894 .rss_key = conf->conf.key_len ?
2895 (void *)(uintptr_t)conf->conf.key : NULL,
2896 .rss_key_len = conf->conf.key_len,
2897 .rss_hf = conf->conf.types,
2899 struct e1000_filter_info *filter_info =
2900 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2901 struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2903 hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2906 if (igb_action_rss_same(&filter_info->rss_info.conf,
2908 igb_rss_disable(dev);
2909 memset(&filter_info->rss_info, 0,
2910 sizeof(struct igb_rte_flow_rss_conf));
2916 if (filter_info->rss_info.conf.queue_num)
2919 /* Fill in redirection table. */
2920 shift = (hw->mac.type == e1000_82575) ? 6 : 0;
2921 for (i = 0, j = 0; i < 128; i++, j++) {
2928 if (j == conf->conf.queue_num)
2930 q_idx = conf->conf.queue[j];
2931 reta.bytes[i & 3] = (uint8_t)(q_idx << shift);
2933 E1000_WRITE_REG(hw, E1000_RETA(i >> 2), reta.dword);
2936 /* Configure the RSS key and the RSS protocols used to compute
2937 * the RSS hash of input packets.
2939 if ((rss_conf.rss_hf & IGB_RSS_OFFLOAD_ALL) == 0) {
2940 igb_rss_disable(dev);
2943 if (rss_conf.rss_key == NULL)
2944 rss_conf.rss_key = rss_intel_key; /* Default hash key */
2945 igb_hw_rss_hash_set(hw, &rss_conf);
2947 if (igb_rss_conf_init(&filter_info->rss_info, &conf->conf))