1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include <rte_config.h>
7 #include <rte_malloc.h>
8 #include <ethdev_driver.h>
14 #ifdef RTE_PMD_USE_PREFETCH
15 #define rte_igc_prefetch(p) rte_prefetch0(p)
17 #define rte_igc_prefetch(p) do {} while (0)
20 #ifdef RTE_PMD_PACKET_PREFETCH
21 #define rte_packet_prefetch(p) rte_prefetch1(p)
23 #define rte_packet_prefetch(p) do {} while (0)
26 /* Multicast / Unicast table offset mask. */
27 #define IGC_RCTL_MO_MSK (3u << IGC_RCTL_MO_SHIFT)
30 #define IGC_RCTL_LBM_SHIFT 6
31 #define IGC_RCTL_LBM_MSK (3u << IGC_RCTL_LBM_SHIFT)
33 /* Hash select for MTA */
34 #define IGC_RCTL_HSEL_SHIFT 8
35 #define IGC_RCTL_HSEL_MSK (3u << IGC_RCTL_HSEL_SHIFT)
36 #define IGC_RCTL_PSP (1u << 21)
38 /* Receive buffer size for header buffer */
39 #define IGC_SRRCTL_BSIZEHEADER_SHIFT 8
41 /* RX descriptor status and error flags */
42 #define IGC_RXD_STAT_L4CS (1u << 5)
43 #define IGC_RXD_STAT_VEXT (1u << 9)
44 #define IGC_RXD_STAT_LLINT (1u << 11)
45 #define IGC_RXD_STAT_SCRC (1u << 12)
46 #define IGC_RXD_STAT_SMDT_MASK (3u << 13)
47 #define IGC_RXD_STAT_MC (1u << 19)
48 #define IGC_RXD_EXT_ERR_L4E (1u << 29)
49 #define IGC_RXD_EXT_ERR_IPE (1u << 30)
50 #define IGC_RXD_EXT_ERR_RXE (1u << 31)
51 #define IGC_RXD_RSS_TYPE_MASK 0xfu
52 #define IGC_RXD_PCTYPE_MASK (0x7fu << 4)
53 #define IGC_RXD_ETQF_SHIFT 12
54 #define IGC_RXD_ETQF_MSK (0xfu << IGC_RXD_ETQF_SHIFT)
55 #define IGC_RXD_VPKT (1u << 16)
57 /* TXD control bits */
58 #define IGC_TXDCTL_PTHRESH_SHIFT 0
59 #define IGC_TXDCTL_HTHRESH_SHIFT 8
60 #define IGC_TXDCTL_WTHRESH_SHIFT 16
61 #define IGC_TXDCTL_PTHRESH_MSK (0x1fu << IGC_TXDCTL_PTHRESH_SHIFT)
62 #define IGC_TXDCTL_HTHRESH_MSK (0x1fu << IGC_TXDCTL_HTHRESH_SHIFT)
63 #define IGC_TXDCTL_WTHRESH_MSK (0x1fu << IGC_TXDCTL_WTHRESH_SHIFT)
65 /* RXD control bits */
66 #define IGC_RXDCTL_PTHRESH_SHIFT 0
67 #define IGC_RXDCTL_HTHRESH_SHIFT 8
68 #define IGC_RXDCTL_WTHRESH_SHIFT 16
69 #define IGC_RXDCTL_PTHRESH_MSK (0x1fu << IGC_RXDCTL_PTHRESH_SHIFT)
70 #define IGC_RXDCTL_HTHRESH_MSK (0x1fu << IGC_RXDCTL_HTHRESH_SHIFT)
71 #define IGC_RXDCTL_WTHRESH_MSK (0x1fu << IGC_RXDCTL_WTHRESH_SHIFT)
73 #define IGC_TSO_MAX_HDRLEN 512
74 #define IGC_TSO_MAX_MSS 9216
76 /* Bit Mask to indicate what bits required for building TX context */
77 #define IGC_TX_OFFLOAD_MASK ( \
87 #define IGC_TX_OFFLOAD_SEG (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)
89 #define IGC_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
90 #define IGC_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
92 /* L4 Packet TYPE of Reserved */
93 #define IGC_ADVTXD_TUCMD_L4T_RSV 0x00001800
95 #define IGC_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
98 * Structure associated with each descriptor of the RX ring of a RX queue.
100 struct igc_rx_entry {
101 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
105 * Structure associated with each RX queue.
107 struct igc_rx_queue {
108 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
109 volatile union igc_adv_rx_desc *rx_ring;
110 /**< RX ring virtual address. */
111 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
112 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
113 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
114 struct igc_rx_entry *sw_ring; /**< address of RX software ring. */
115 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
116 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
117 uint16_t nb_rx_desc; /**< number of RX descriptors. */
118 uint16_t rx_tail; /**< current value of RDT register. */
119 uint16_t nb_rx_hold; /**< number of held free RX desc. */
120 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
121 uint16_t queue_id; /**< RX queue index. */
122 uint16_t reg_idx; /**< RX queue register index. */
123 uint16_t port_id; /**< Device port identifier. */
124 uint8_t pthresh; /**< Prefetch threshold register. */
125 uint8_t hthresh; /**< Host threshold register. */
126 uint8_t wthresh; /**< Write-back threshold register. */
127 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
128 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
129 uint32_t flags; /**< RX flags. */
130 uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
133 /** Offload features */
134 union igc_tx_offload {
137 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
138 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
139 uint64_t vlan_tci:16;
140 /**< VLAN Tag Control Identifier(CPU order). */
141 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
142 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
143 /* uint64_t unused:8; */
148 * Compare mask for igc_tx_offload.data,
149 * should be in sync with igc_tx_offload layout.
151 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
152 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
153 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
154 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
155 /** Mac + IP + TCP + Mss mask. */
156 #define TX_TSO_CMP_MASK \
157 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
160 * Structure to check if new context need be built
162 struct igc_advctx_info {
163 uint64_t flags; /**< ol_flags related to context build. */
164 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
165 union igc_tx_offload tx_offload;
166 /** compare mask for tx offload. */
167 union igc_tx_offload tx_offload_mask;
171 * Hardware context number
174 IGC_CTX_0 = 0, /**< CTX0 */
175 IGC_CTX_1 = 1, /**< CTX1 */
176 IGC_CTX_NUM = 2, /**< CTX_NUM */
180 * Structure associated with each descriptor of the TX ring of a TX queue.
182 struct igc_tx_entry {
183 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
184 uint16_t next_id; /**< Index of next descriptor in ring. */
185 uint16_t last_id; /**< Index of last scattered descriptor. */
189 * Structure associated with each TX queue.
191 struct igc_tx_queue {
192 volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
193 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
194 struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */
195 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
196 uint32_t txd_type; /**< Device-specific TXD type */
197 uint16_t nb_tx_desc; /**< number of TX descriptors. */
198 uint16_t tx_tail; /**< Current value of TDT register. */
200 /**< Index of first used TX descriptor. */
201 uint16_t queue_id; /**< TX queue index. */
202 uint16_t reg_idx; /**< TX queue register index. */
203 uint16_t port_id; /**< Device port identifier. */
204 uint8_t pthresh; /**< Prefetch threshold register. */
205 uint8_t hthresh; /**< Host threshold register. */
206 uint8_t wthresh; /**< Write-back threshold register. */
209 /**< Start context position for transmit queue. */
210 struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
211 /**< Hardware context history.*/
212 uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
215 static inline uint64_t
216 rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
218 static uint64_t l4_chksum_flags[] = {0, 0, PKT_RX_L4_CKSUM_GOOD,
219 PKT_RX_L4_CKSUM_BAD};
221 static uint64_t l3_chksum_flags[] = {0, 0, PKT_RX_IP_CKSUM_GOOD,
222 PKT_RX_IP_CKSUM_BAD};
223 uint64_t pkt_flags = 0;
226 if (statuserr & IGC_RXD_STAT_VP)
227 pkt_flags |= PKT_RX_VLAN_STRIPPED;
229 tmp = !!(statuserr & (IGC_RXD_STAT_L4CS | IGC_RXD_STAT_UDPCS));
230 tmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_L4E);
231 pkt_flags |= l4_chksum_flags[tmp];
233 tmp = !!(statuserr & IGC_RXD_STAT_IPCS);
234 tmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_IPE);
235 pkt_flags |= l3_chksum_flags[tmp];
240 #define IGC_PACKET_TYPE_IPV4 0X01
241 #define IGC_PACKET_TYPE_IPV4_TCP 0X11
242 #define IGC_PACKET_TYPE_IPV4_UDP 0X21
243 #define IGC_PACKET_TYPE_IPV4_SCTP 0X41
244 #define IGC_PACKET_TYPE_IPV4_EXT 0X03
245 #define IGC_PACKET_TYPE_IPV4_EXT_SCTP 0X43
246 #define IGC_PACKET_TYPE_IPV6 0X04
247 #define IGC_PACKET_TYPE_IPV6_TCP 0X14
248 #define IGC_PACKET_TYPE_IPV6_UDP 0X24
249 #define IGC_PACKET_TYPE_IPV6_EXT 0X0C
250 #define IGC_PACKET_TYPE_IPV6_EXT_TCP 0X1C
251 #define IGC_PACKET_TYPE_IPV6_EXT_UDP 0X2C
252 #define IGC_PACKET_TYPE_IPV4_IPV6 0X05
253 #define IGC_PACKET_TYPE_IPV4_IPV6_TCP 0X15
254 #define IGC_PACKET_TYPE_IPV4_IPV6_UDP 0X25
255 #define IGC_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
256 #define IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
257 #define IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
258 #define IGC_PACKET_TYPE_MAX 0X80
259 #define IGC_PACKET_TYPE_MASK 0X7F
260 #define IGC_PACKET_TYPE_SHIFT 0X04
262 static inline uint32_t
263 rx_desc_pkt_info_to_pkt_type(uint32_t pkt_info)
265 static const uint32_t
266 ptype_table[IGC_PACKET_TYPE_MAX] __rte_cache_aligned = {
267 [IGC_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
269 [IGC_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
270 RTE_PTYPE_L3_IPV4_EXT,
271 [IGC_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
273 [IGC_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
274 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
275 RTE_PTYPE_INNER_L3_IPV6,
276 [IGC_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
277 RTE_PTYPE_L3_IPV6_EXT,
278 [IGC_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
279 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
280 RTE_PTYPE_INNER_L3_IPV6_EXT,
281 [IGC_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
282 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
283 [IGC_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
284 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
285 [IGC_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
286 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
287 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
288 [IGC_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
289 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
290 [IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
291 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
292 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
293 [IGC_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
294 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
295 [IGC_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
296 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
297 [IGC_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
298 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
299 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
300 [IGC_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
301 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
302 [IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
303 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
304 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
305 [IGC_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
306 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
307 [IGC_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
308 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
310 if (unlikely(pkt_info & IGC_RXDADV_PKTTYPE_ETQF))
311 return RTE_PTYPE_UNKNOWN;
313 pkt_info = (pkt_info >> IGC_PACKET_TYPE_SHIFT) & IGC_PACKET_TYPE_MASK;
315 return ptype_table[pkt_info];
319 rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm,
320 union igc_adv_rx_desc *rxd, uint32_t staterr)
323 uint32_t hlen_type_rss;
326 /* Prefetch data of first segment, if configured to do so. */
327 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
329 rxm->port = rxq->port_id;
330 hlen_type_rss = rte_le_to_cpu_32(rxd->wb.lower.lo_dword.data);
331 rxm->hash.rss = rte_le_to_cpu_32(rxd->wb.lower.hi_dword.rss);
332 rxm->vlan_tci = rte_le_to_cpu_16(rxd->wb.upper.vlan);
334 pkt_flags = (hlen_type_rss & IGC_RXD_RSS_TYPE_MASK) ?
337 if (hlen_type_rss & IGC_RXD_VPKT)
338 pkt_flags |= PKT_RX_VLAN;
340 pkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr);
342 rxm->ol_flags = pkt_flags;
343 pkt_info = rte_le_to_cpu_16(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
344 rxm->packet_type = rx_desc_pkt_info_to_pkt_type(pkt_info);
348 igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
350 struct igc_rx_queue * const rxq = rx_queue;
351 volatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring;
352 struct igc_rx_entry * const sw_ring = rxq->sw_ring;
353 uint16_t rx_id = rxq->rx_tail;
355 uint16_t nb_hold = 0;
357 while (nb_rx < nb_pkts) {
358 volatile union igc_adv_rx_desc *rxdp;
359 struct igc_rx_entry *rxe;
360 struct rte_mbuf *rxm;
361 struct rte_mbuf *nmb;
362 union igc_adv_rx_desc rxd;
367 * The order of operations here is important as the DD status
368 * bit must not be read after any other descriptor fields.
369 * rx_ring and rxdp are pointing to volatile data so the order
370 * of accesses cannot be reordered by the compiler. If they were
371 * not volatile, they could be reordered which could lead to
372 * using invalid descriptor fields when read from rxd.
374 rxdp = &rx_ring[rx_id];
375 staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);
376 if (!(staterr & IGC_RXD_STAT_DD))
383 * If the IGC_RXD_STAT_EOP flag is not set, the RX packet is
384 * likely to be invalid and to be dropped by the various
385 * validation checks performed by the network stack.
387 * Allocate a new mbuf to replenish the RX ring descriptor.
388 * If the allocation fails:
389 * - arrange for that RX descriptor to be the first one
390 * being parsed the next time the receive function is
391 * invoked [on the same queue].
393 * - Stop parsing the RX ring and return immediately.
395 * This policy does not drop the packet received in the RX
396 * descriptor for which the allocation of a new mbuf failed.
397 * Thus, it allows that packet to be later retrieved if
398 * mbuf have been freed in the mean time.
399 * As a side effect, holding RX descriptors instead of
400 * systematically giving them back to the NIC may lead to
401 * RX ring exhaustion situations.
402 * However, the NIC can gracefully prevent such situations
403 * to happen by sending specific "back-pressure" flow control
404 * frames to its peer(s).
407 "port_id=%u queue_id=%u rx_id=%u staterr=0x%x data_len=%u",
408 rxq->port_id, rxq->queue_id, rx_id, staterr,
409 rte_le_to_cpu_16(rxd.wb.upper.length));
411 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
415 "RX mbuf alloc failed, port_id=%u queue_id=%u",
416 rxq->port_id, rxq->queue_id);
418 rte_eth_devices[id].data->rx_mbuf_alloc_failed++;
423 rxe = &sw_ring[rx_id];
425 if (rx_id >= rxq->nb_rx_desc)
428 /* Prefetch next mbuf while processing current one. */
429 rte_igc_prefetch(sw_ring[rx_id].mbuf);
432 * When next RX descriptor is on a cache-line boundary,
433 * prefetch the next 4 RX descriptors and the next 8 pointers
436 if ((rx_id & 0x3) == 0) {
437 rte_igc_prefetch(&rx_ring[rx_id]);
438 rte_igc_prefetch(&sw_ring[rx_id]);
442 * Update RX descriptor with the physical address of the new
443 * data buffer of the new allocated mbuf.
447 rxdp->read.hdr_addr = 0;
448 rxdp->read.pkt_addr =
449 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
452 rxm->data_off = RTE_PKTMBUF_HEADROOM;
453 data_len = rte_le_to_cpu_16(rxd.wb.upper.length) - rxq->crc_len;
454 rxm->data_len = data_len;
455 rxm->pkt_len = data_len;
458 rx_desc_get_pkt_info(rxq, rxm, &rxd, staterr);
461 * Store the mbuf address into the next entry of the array
462 * of returned packets.
464 rx_pkts[nb_rx++] = rxm;
466 rxq->rx_tail = rx_id;
469 * If the number of free RX descriptors is greater than the RX free
470 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
472 * Update the RDT with the value of the last processed RX descriptor
473 * minus 1, to guarantee that the RDT register is never equal to the
474 * RDH register, which creates a "full" ring situation from the
475 * hardware point of view...
477 nb_hold = nb_hold + rxq->nb_rx_hold;
478 if (nb_hold > rxq->rx_free_thresh) {
480 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
481 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
482 rx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1);
483 IGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
486 rxq->nb_rx_hold = nb_hold;
491 igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
494 struct igc_rx_queue * const rxq = rx_queue;
495 volatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring;
496 struct igc_rx_entry * const sw_ring = rxq->sw_ring;
497 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
498 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
500 uint16_t rx_id = rxq->rx_tail;
502 uint16_t nb_hold = 0;
504 while (nb_rx < nb_pkts) {
505 volatile union igc_adv_rx_desc *rxdp;
506 struct igc_rx_entry *rxe;
507 struct rte_mbuf *rxm;
508 struct rte_mbuf *nmb;
509 union igc_adv_rx_desc rxd;
515 * The order of operations here is important as the DD status
516 * bit must not be read after any other descriptor fields.
517 * rx_ring and rxdp are pointing to volatile data so the order
518 * of accesses cannot be reordered by the compiler. If they were
519 * not volatile, they could be reordered which could lead to
520 * using invalid descriptor fields when read from rxd.
522 rxdp = &rx_ring[rx_id];
523 staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);
524 if (!(staterr & IGC_RXD_STAT_DD))
531 * Allocate a new mbuf to replenish the RX ring descriptor.
532 * If the allocation fails:
533 * - arrange for that RX descriptor to be the first one
534 * being parsed the next time the receive function is
535 * invoked [on the same queue].
537 * - Stop parsing the RX ring and return immediately.
539 * This policy does not drop the packet received in the RX
540 * descriptor for which the allocation of a new mbuf failed.
541 * Thus, it allows that packet to be later retrieved if
542 * mbuf have been freed in the mean time.
543 * As a side effect, holding RX descriptors instead of
544 * systematically giving them back to the NIC may lead to
545 * RX ring exhaustion situations.
546 * However, the NIC can gracefully prevent such situations
547 * to happen by sending specific "back-pressure" flow control
548 * frames to its peer(s).
551 "port_id=%u queue_id=%u rx_id=%u staterr=0x%x data_len=%u",
552 rxq->port_id, rxq->queue_id, rx_id, staterr,
553 rte_le_to_cpu_16(rxd.wb.upper.length));
555 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
559 "RX mbuf alloc failed, port_id=%u queue_id=%u",
560 rxq->port_id, rxq->queue_id);
562 rte_eth_devices[id].data->rx_mbuf_alloc_failed++;
567 rxe = &sw_ring[rx_id];
569 if (rx_id >= rxq->nb_rx_desc)
572 /* Prefetch next mbuf while processing current one. */
573 rte_igc_prefetch(sw_ring[rx_id].mbuf);
576 * When next RX descriptor is on a cache-line boundary,
577 * prefetch the next 4 RX descriptors and the next 8 pointers
580 if ((rx_id & 0x3) == 0) {
581 rte_igc_prefetch(&rx_ring[rx_id]);
582 rte_igc_prefetch(&sw_ring[rx_id]);
586 * Update RX descriptor with the physical address of the new
587 * data buffer of the new allocated mbuf.
591 rxdp->read.hdr_addr = 0;
592 rxdp->read.pkt_addr =
593 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
597 * Set data length & data buffer address of mbuf.
599 rxm->data_off = RTE_PKTMBUF_HEADROOM;
600 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
601 rxm->data_len = data_len;
604 * If this is the first buffer of the received packet,
605 * set the pointer to the first mbuf of the packet and
606 * initialize its context.
607 * Otherwise, update the total length and the number of segments
608 * of the current scattered packet, and update the pointer to
609 * the last mbuf of the current packet.
611 if (first_seg == NULL) {
613 first_seg->pkt_len = data_len;
614 first_seg->nb_segs = 1;
616 first_seg->pkt_len += data_len;
617 first_seg->nb_segs++;
618 last_seg->next = rxm;
622 * If this is not the last buffer of the received packet,
623 * update the pointer to the last mbuf of the current scattered
624 * packet and continue to parse the RX ring.
626 if (!(staterr & IGC_RXD_STAT_EOP)) {
632 * This is the last buffer of the received packet.
633 * If the CRC is not stripped by the hardware:
634 * - Subtract the CRC length from the total packet length.
635 * - If the last buffer only contains the whole CRC or a part
636 * of it, free the mbuf associated to the last buffer.
637 * If part of the CRC is also contained in the previous
638 * mbuf, subtract the length of that CRC part from the
639 * data length of the previous mbuf.
641 if (unlikely(rxq->crc_len > 0)) {
642 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
643 if (data_len <= RTE_ETHER_CRC_LEN) {
644 rte_pktmbuf_free_seg(rxm);
645 first_seg->nb_segs--;
646 last_seg->data_len = last_seg->data_len -
647 (RTE_ETHER_CRC_LEN - data_len);
648 last_seg->next = NULL;
650 rxm->data_len = (uint16_t)
651 (data_len - RTE_ETHER_CRC_LEN);
655 rx_desc_get_pkt_info(rxq, first_seg, &rxd, staterr);
658 * Store the mbuf address into the next entry of the array
659 * of returned packets.
661 rx_pkts[nb_rx++] = first_seg;
663 /* Setup receipt context for a new packet. */
666 rxq->rx_tail = rx_id;
669 * Save receive context.
671 rxq->pkt_first_seg = first_seg;
672 rxq->pkt_last_seg = last_seg;
675 * If the number of free RX descriptors is greater than the RX free
676 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
678 * Update the RDT with the value of the last processed RX descriptor
679 * minus 1, to guarantee that the RDT register is never equal to the
680 * RDH register, which creates a "full" ring situation from the
681 * hardware point of view...
683 nb_hold = nb_hold + rxq->nb_rx_hold;
684 if (nb_hold > rxq->rx_free_thresh) {
686 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
687 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
688 rx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1);
689 IGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
692 rxq->nb_rx_hold = nb_hold;
697 igc_rx_queue_release_mbufs(struct igc_rx_queue *rxq)
701 if (rxq->sw_ring != NULL) {
702 for (i = 0; i < rxq->nb_rx_desc; i++) {
703 if (rxq->sw_ring[i].mbuf != NULL) {
704 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
705 rxq->sw_ring[i].mbuf = NULL;
712 igc_rx_queue_release(struct igc_rx_queue *rxq)
714 igc_rx_queue_release_mbufs(rxq);
715 rte_free(rxq->sw_ring);
719 void eth_igc_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
721 if (dev->data->rx_queues[qid])
722 igc_rx_queue_release(dev->data->rx_queues[qid]);
725 uint32_t eth_igc_rx_queue_count(void *rx_queue)
728 * Check the DD bit of a rx descriptor of each 4 in a group,
729 * to avoid checking too frequently and downgrading performance
732 #define IGC_RXQ_SCAN_INTERVAL 4
734 volatile union igc_adv_rx_desc *rxdp;
735 struct igc_rx_queue *rxq;
739 rxdp = &rxq->rx_ring[rxq->rx_tail];
741 while (desc < rxq->nb_rx_desc - rxq->rx_tail) {
742 if (unlikely(!(rxdp->wb.upper.status_error &
745 desc += IGC_RXQ_SCAN_INTERVAL;
746 rxdp += IGC_RXQ_SCAN_INTERVAL;
748 rxdp = &rxq->rx_ring[rxq->rx_tail + desc - rxq->nb_rx_desc];
750 while (desc < rxq->nb_rx_desc &&
751 (rxdp->wb.upper.status_error & IGC_RXD_STAT_DD)) {
752 desc += IGC_RXQ_SCAN_INTERVAL;
753 rxdp += IGC_RXQ_SCAN_INTERVAL;
759 int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)
761 struct igc_rx_queue *rxq = rx_queue;
762 volatile uint32_t *status;
765 if (unlikely(!rxq || offset >= rxq->nb_rx_desc))
768 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
769 return RTE_ETH_RX_DESC_UNAVAIL;
771 desc = rxq->rx_tail + offset;
772 if (desc >= rxq->nb_rx_desc)
773 desc -= rxq->nb_rx_desc;
775 status = &rxq->rx_ring[desc].wb.upper.status_error;
776 if (*status & rte_cpu_to_le_32(IGC_RXD_STAT_DD))
777 return RTE_ETH_RX_DESC_DONE;
779 return RTE_ETH_RX_DESC_AVAIL;
783 igc_alloc_rx_queue_mbufs(struct igc_rx_queue *rxq)
785 struct igc_rx_entry *rxe = rxq->sw_ring;
789 /* Initialize software ring entries. */
790 for (i = 0; i < rxq->nb_rx_desc; i++) {
791 volatile union igc_adv_rx_desc *rxd;
792 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
795 PMD_DRV_LOG(ERR, "RX mbuf alloc failed, queue_id=%hu",
799 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
800 rxd = &rxq->rx_ring[i];
801 rxd->read.hdr_addr = 0;
802 rxd->read.pkt_addr = dma_addr;
810 * RSS random key supplied in section 7.1.2.9.3 of the Intel I225 datasheet.
811 * Used as the default key.
813 static uint8_t default_rss_key[40] = {
814 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
815 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
816 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
817 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
818 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
822 igc_rss_disable(struct rte_eth_dev *dev)
824 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
827 mrqc = IGC_READ_REG(hw, IGC_MRQC);
828 mrqc &= ~IGC_MRQC_ENABLE_MASK;
829 IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
833 igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
835 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
839 if (hash_key != NULL) {
842 /* Fill in RSS hash key */
843 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
844 IGC_WRITE_REG_LE_VALUE(hw, IGC_RSSRK(i), hash_key[i]);
847 /* Set configured hashing protocols in MRQC register */
848 rss_hf = rss_conf->rss_hf;
849 mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
850 if (rss_hf & ETH_RSS_IPV4)
851 mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
852 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
853 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
854 if (rss_hf & ETH_RSS_IPV6)
855 mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
856 if (rss_hf & ETH_RSS_IPV6_EX)
857 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
858 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
859 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
860 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
861 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
862 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
863 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
864 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
865 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
866 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
867 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
868 IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
872 igc_rss_configure(struct rte_eth_dev *dev)
874 struct rte_eth_rss_conf rss_conf;
875 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
878 /* Fill in redirection table. */
879 for (i = 0; i < IGC_RSS_RDT_SIZD; i++) {
880 union igc_rss_reta_reg reta;
881 uint16_t q_idx, reta_idx;
883 q_idx = (uint8_t)((dev->data->nb_rx_queues > 1) ?
884 i % dev->data->nb_rx_queues : 0);
885 reta_idx = i % sizeof(reta);
886 reta.bytes[reta_idx] = q_idx;
887 if (reta_idx == sizeof(reta) - 1)
888 IGC_WRITE_REG_LE_VALUE(hw,
889 IGC_RETA(i / sizeof(reta)), reta.dword);
893 * Configure the RSS key and the RSS protocols used to compute
894 * the RSS hash of input packets.
896 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
897 if (rss_conf.rss_key == NULL)
898 rss_conf.rss_key = default_rss_key;
899 igc_hw_rss_hash_set(hw, &rss_conf);
903 igc_del_rss_filter(struct rte_eth_dev *dev)
905 struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
907 if (rss_filter->enable) {
908 /* recover default RSS configuration */
909 igc_rss_configure(dev);
911 /* disable RSS logic and clear filter data */
912 igc_rss_disable(dev);
913 memset(rss_filter, 0, sizeof(*rss_filter));
916 PMD_DRV_LOG(ERR, "filter not exist!");
920 /* Initiate the filter structure by the structure of rte_flow_action_rss */
922 igc_rss_conf_set(struct igc_rss_filter *out,
923 const struct rte_flow_action_rss *rss)
925 out->conf.func = rss->func;
926 out->conf.level = rss->level;
927 out->conf.types = rss->types;
929 if (rss->key_len == sizeof(out->key)) {
930 memcpy(out->key, rss->key, rss->key_len);
931 out->conf.key = out->key;
932 out->conf.key_len = rss->key_len;
934 out->conf.key = NULL;
935 out->conf.key_len = 0;
938 if (rss->queue_num <= IGC_RSS_RDT_SIZD) {
939 memcpy(out->queue, rss->queue,
940 sizeof(*out->queue) * rss->queue_num);
941 out->conf.queue = out->queue;
942 out->conf.queue_num = rss->queue_num;
944 out->conf.queue = NULL;
945 out->conf.queue_num = 0;
950 igc_add_rss_filter(struct rte_eth_dev *dev, struct igc_rss_filter *rss)
952 struct rte_eth_rss_conf rss_conf = {
953 .rss_key = rss->conf.key_len ?
954 (void *)(uintptr_t)rss->conf.key : NULL,
955 .rss_key_len = rss->conf.key_len,
956 .rss_hf = rss->conf.types,
958 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
959 struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
962 /* check RSS type is valid */
963 if ((rss_conf.rss_hf & IGC_RSS_OFFLOAD_ALL) == 0) {
965 "RSS type(0x%" PRIx64 ") error!, only 0x%" PRIx64
966 " been supported", rss_conf.rss_hf,
967 (uint64_t)IGC_RSS_OFFLOAD_ALL);
971 /* check queue count is not zero */
972 if (!rss->conf.queue_num) {
973 PMD_DRV_LOG(ERR, "Queue number should not be 0!");
977 /* check queue id is valid */
978 for (i = 0; i < rss->conf.queue_num; i++)
979 if (rss->conf.queue[i] >= dev->data->nb_rx_queues) {
980 PMD_DRV_LOG(ERR, "Queue id %u is invalid!",
985 /* only support one filter */
986 if (rss_filter->enable) {
987 PMD_DRV_LOG(ERR, "Only support one RSS filter!");
990 rss_filter->enable = 1;
992 igc_rss_conf_set(rss_filter, &rss->conf);
994 /* Fill in redirection table. */
995 for (i = 0, j = 0; i < IGC_RSS_RDT_SIZD; i++, j++) {
996 union igc_rss_reta_reg reta;
997 uint16_t q_idx, reta_idx;
999 if (j == rss->conf.queue_num)
1001 q_idx = rss->conf.queue[j];
1002 reta_idx = i % sizeof(reta);
1003 reta.bytes[reta_idx] = q_idx;
1004 if (reta_idx == sizeof(reta) - 1)
1005 IGC_WRITE_REG_LE_VALUE(hw,
1006 IGC_RETA(i / sizeof(reta)), reta.dword);
1009 if (rss_conf.rss_key == NULL)
1010 rss_conf.rss_key = default_rss_key;
1011 igc_hw_rss_hash_set(hw, &rss_conf);
1016 igc_clear_rss_filter(struct rte_eth_dev *dev)
1018 struct igc_rss_filter *rss_filter = IGC_DEV_PRIVATE_RSS_FILTER(dev);
1020 if (!rss_filter->enable)
1023 /* recover default RSS configuration */
1024 igc_rss_configure(dev);
1026 /* disable RSS logic and clear filter data */
1027 igc_rss_disable(dev);
1028 memset(rss_filter, 0, sizeof(*rss_filter));
1032 igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
1034 if (RTE_ETH_DEV_SRIOV(dev).active) {
1035 PMD_DRV_LOG(ERR, "SRIOV unsupported!");
1039 switch (dev->data->dev_conf.rxmode.mq_mode) {
1041 igc_rss_configure(dev);
1043 case ETH_MQ_RX_NONE:
1045 * configure RSS register for following,
1046 * then disable the RSS logic
1048 igc_rss_configure(dev);
1049 igc_rss_disable(dev);
1052 PMD_DRV_LOG(ERR, "rx mode(%d) not supported!",
1053 dev->data->dev_conf.rxmode.mq_mode);
1060 igc_rx_init(struct rte_eth_dev *dev)
1062 struct igc_rx_queue *rxq;
1063 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1064 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
1065 uint32_t max_rx_pktlen;
1069 uint16_t rctl_bsize;
1073 dev->rx_pkt_burst = igc_recv_pkts;
1076 * Make sure receives are disabled while setting
1077 * up the descriptor ring.
1079 rctl = IGC_READ_REG(hw, IGC_RCTL);
1080 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
1082 /* Configure support of jumbo frames, if any. */
1083 if (dev->data->mtu > RTE_ETHER_MTU)
1084 rctl |= IGC_RCTL_LPE;
1086 rctl &= ~IGC_RCTL_LPE;
1088 max_rx_pktlen = dev->data->mtu + IGC_ETH_OVERHEAD;
1090 * Set maximum packet length by default, and might be updated
1091 * together with enabling/disabling dual VLAN.
1093 IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pktlen);
1095 /* Configure and enable each RX queue. */
1097 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1102 rxq = dev->data->rx_queues[i];
1105 /* Allocate buffers for descriptor rings and set up queue */
1106 ret = igc_alloc_rx_queue_mbufs(rxq);
1111 * Reset crc_len in case it was changed after queue setup by a
1114 rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
1115 RTE_ETHER_CRC_LEN : 0;
1117 bus_addr = rxq->rx_ring_phys_addr;
1118 IGC_WRITE_REG(hw, IGC_RDLEN(rxq->reg_idx),
1120 sizeof(union igc_adv_rx_desc));
1121 IGC_WRITE_REG(hw, IGC_RDBAH(rxq->reg_idx),
1122 (uint32_t)(bus_addr >> 32));
1123 IGC_WRITE_REG(hw, IGC_RDBAL(rxq->reg_idx),
1124 (uint32_t)bus_addr);
1126 /* set descriptor configuration */
1127 srrctl = IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
1129 srrctl |= (uint32_t)(RTE_PKTMBUF_HEADROOM / 64) <<
1130 IGC_SRRCTL_BSIZEHEADER_SHIFT;
1132 * Configure RX buffer size.
1134 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1135 RTE_PKTMBUF_HEADROOM);
1136 if (buf_size >= 1024) {
1138 * Configure the BSIZEPACKET field of the SRRCTL
1139 * register of the queue.
1140 * Value is in 1 KB resolution, from 1 KB to 16 KB.
1141 * If this field is equal to 0b, then RCTL.BSIZE
1142 * determines the RX packet buffer size.
1145 srrctl |= ((buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT) &
1146 IGC_SRRCTL_BSIZEPKT_MASK);
1147 buf_size = (uint16_t)((srrctl &
1148 IGC_SRRCTL_BSIZEPKT_MASK) <<
1149 IGC_SRRCTL_BSIZEPKT_SHIFT);
1151 /* It adds dual VLAN length for supporting dual VLAN */
1152 if (max_rx_pktlen > buf_size)
1153 dev->data->scattered_rx = 1;
1156 * Use BSIZE field of the device RCTL register.
1158 if (rctl_bsize == 0 || rctl_bsize > buf_size)
1159 rctl_bsize = buf_size;
1160 dev->data->scattered_rx = 1;
1163 /* Set if packets are dropped when no descriptors available */
1165 srrctl |= IGC_SRRCTL_DROP_EN;
1167 IGC_WRITE_REG(hw, IGC_SRRCTL(rxq->reg_idx), srrctl);
1169 /* Enable this RX queue. */
1170 rxdctl = IGC_RXDCTL_QUEUE_ENABLE;
1171 rxdctl |= ((uint32_t)rxq->pthresh << IGC_RXDCTL_PTHRESH_SHIFT) &
1172 IGC_RXDCTL_PTHRESH_MSK;
1173 rxdctl |= ((uint32_t)rxq->hthresh << IGC_RXDCTL_HTHRESH_SHIFT) &
1174 IGC_RXDCTL_HTHRESH_MSK;
1175 rxdctl |= ((uint32_t)rxq->wthresh << IGC_RXDCTL_WTHRESH_SHIFT) &
1176 IGC_RXDCTL_WTHRESH_MSK;
1177 IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
1180 if (offloads & DEV_RX_OFFLOAD_SCATTER)
1181 dev->data->scattered_rx = 1;
1183 if (dev->data->scattered_rx) {
1184 PMD_DRV_LOG(DEBUG, "forcing scatter mode");
1185 dev->rx_pkt_burst = igc_recv_scattered_pkts;
1188 * Setup BSIZE field of RCTL register, if needed.
1189 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1190 * register, since the code above configures the SRRCTL register of
1191 * the RX queue in such a case.
1192 * All configurable sizes are:
1193 * 16384: rctl |= (IGC_RCTL_SZ_16384 | IGC_RCTL_BSEX);
1194 * 8192: rctl |= (IGC_RCTL_SZ_8192 | IGC_RCTL_BSEX);
1195 * 4096: rctl |= (IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX);
1196 * 2048: rctl |= IGC_RCTL_SZ_2048;
1197 * 1024: rctl |= IGC_RCTL_SZ_1024;
1198 * 512: rctl |= IGC_RCTL_SZ_512;
1199 * 256: rctl |= IGC_RCTL_SZ_256;
1201 if (rctl_bsize > 0) {
1202 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1203 rctl |= IGC_RCTL_SZ_512;
1204 else /* 256 <= buf_size < 512 - use 256 */
1205 rctl |= IGC_RCTL_SZ_256;
1209 * Configure RSS if device configured with multiple RX queues.
1211 igc_dev_mq_rx_configure(dev);
1213 /* Update the rctl since igc_dev_mq_rx_configure may change its value */
1214 rctl |= IGC_READ_REG(hw, IGC_RCTL);
1217 * Setup the Checksum Register.
1218 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1220 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
1221 rxcsum |= IGC_RXCSUM_PCSD;
1223 /* Enable both L3/L4 rx checksum offload */
1224 if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
1225 rxcsum |= IGC_RXCSUM_IPOFL;
1227 rxcsum &= ~IGC_RXCSUM_IPOFL;
1230 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
1231 rxcsum |= IGC_RXCSUM_TUOFL;
1232 offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
1234 rxcsum &= ~IGC_RXCSUM_TUOFL;
1237 if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
1238 rxcsum |= IGC_RXCSUM_CRCOFL;
1240 rxcsum &= ~IGC_RXCSUM_CRCOFL;
1242 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
1244 /* Setup the Receive Control Register. */
1245 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1246 rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1248 rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
1250 rctl &= ~IGC_RCTL_MO_MSK;
1251 rctl &= ~IGC_RCTL_LBM_MSK;
1252 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO |
1254 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
1256 if (dev->data->dev_conf.lpbk_mode == 1)
1257 rctl |= IGC_RCTL_LBM_MAC;
1259 rctl &= ~(IGC_RCTL_HSEL_MSK | IGC_RCTL_CFIEN | IGC_RCTL_CFI |
1260 IGC_RCTL_PSP | IGC_RCTL_PMCF);
1262 /* Make sure VLAN Filters are off. */
1263 rctl &= ~IGC_RCTL_VFE;
1264 /* Don't store bad packets. */
1265 rctl &= ~IGC_RCTL_SBP;
1267 /* Enable Receives. */
1268 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1271 * Setup the HW Rx Head and Tail Descriptor Pointers.
1272 * This needs to be done after enable.
1274 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1277 rxq = dev->data->rx_queues[i];
1278 IGC_WRITE_REG(hw, IGC_RDH(rxq->reg_idx), 0);
1279 IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx), rxq->nb_rx_desc - 1);
1281 dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->reg_idx));
1282 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1283 dvmolr |= IGC_DVMOLR_STRVLAN;
1285 dvmolr &= ~IGC_DVMOLR_STRVLAN;
1287 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1288 dvmolr &= ~IGC_DVMOLR_STRCRC;
1290 dvmolr |= IGC_DVMOLR_STRCRC;
1292 IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr);
1299 igc_reset_rx_queue(struct igc_rx_queue *rxq)
1301 static const union igc_adv_rx_desc zeroed_desc = { {0} };
1304 /* Zero out HW ring memory */
1305 for (i = 0; i < rxq->nb_rx_desc; i++)
1306 rxq->rx_ring[i] = zeroed_desc;
1309 rxq->pkt_first_seg = NULL;
1310 rxq->pkt_last_seg = NULL;
1314 eth_igc_rx_queue_setup(struct rte_eth_dev *dev,
1317 unsigned int socket_id,
1318 const struct rte_eth_rxconf *rx_conf,
1319 struct rte_mempool *mp)
1321 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1322 const struct rte_memzone *rz;
1323 struct igc_rx_queue *rxq;
1327 * Validate number of receive descriptors.
1328 * It must not exceed hardware maximum, and must be multiple
1329 * of IGC_RX_DESCRIPTOR_MULTIPLE.
1331 if (nb_desc % IGC_RX_DESCRIPTOR_MULTIPLE != 0 ||
1332 nb_desc > IGC_MAX_RXD || nb_desc < IGC_MIN_RXD) {
1334 "RX descriptor must be multiple of %u(cur: %u) and between %u and %u",
1335 IGC_RX_DESCRIPTOR_MULTIPLE, nb_desc,
1336 IGC_MIN_RXD, IGC_MAX_RXD);
1340 /* Free memory prior to re-allocation if needed */
1341 if (dev->data->rx_queues[queue_idx] != NULL) {
1342 igc_rx_queue_release(dev->data->rx_queues[queue_idx]);
1343 dev->data->rx_queues[queue_idx] = NULL;
1346 /* First allocate the RX queue data structure. */
1347 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igc_rx_queue),
1348 RTE_CACHE_LINE_SIZE);
1351 rxq->offloads = rx_conf->offloads;
1353 rxq->nb_rx_desc = nb_desc;
1354 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1355 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1356 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1357 rxq->drop_en = rx_conf->rx_drop_en;
1358 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1359 rxq->queue_id = queue_idx;
1360 rxq->reg_idx = queue_idx;
1361 rxq->port_id = dev->data->port_id;
1364 * Allocate RX ring hardware descriptors. A memzone large enough to
1365 * handle the maximum ring size is allocated in order to allow for
1366 * resizing in later calls to the queue setup function.
1368 size = sizeof(union igc_adv_rx_desc) * IGC_MAX_RXD;
1369 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1370 IGC_ALIGN, socket_id);
1372 igc_rx_queue_release(rxq);
1375 rxq->rdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDT(rxq->reg_idx));
1376 rxq->rdh_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDH(rxq->reg_idx));
1377 rxq->rx_ring_phys_addr = rz->iova;
1378 rxq->rx_ring = (union igc_adv_rx_desc *)rz->addr;
1380 /* Allocate software ring. */
1381 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1382 sizeof(struct igc_rx_entry) * nb_desc,
1383 RTE_CACHE_LINE_SIZE);
1384 if (rxq->sw_ring == NULL) {
1385 igc_rx_queue_release(rxq);
1389 PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
1390 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1392 dev->data->rx_queues[queue_idx] = rxq;
1393 igc_reset_rx_queue(rxq);
1398 /* prepare packets for transmit */
1400 eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1406 for (i = 0; i < nb_pkts; i++) {
1409 /* Check some limitations for TSO in hardware */
1410 if (m->ol_flags & IGC_TX_OFFLOAD_SEG)
1411 if (m->tso_segsz > IGC_TSO_MAX_MSS ||
1412 m->l2_len + m->l3_len + m->l4_len >
1413 IGC_TSO_MAX_HDRLEN) {
1418 if (m->ol_flags & IGC_TX_OFFLOAD_NOTSUP_MASK) {
1419 rte_errno = ENOTSUP;
1423 #ifdef RTE_ETHDEV_DEBUG_TX
1424 ret = rte_validate_tx_offload(m);
1430 ret = rte_net_intel_cksum_prepare(m);
1441 *There're some limitations in hardware for TCP segmentation offload. We
1442 *should check whether the parameters are valid.
1444 static inline uint64_t
1445 check_tso_para(uint64_t ol_req, union igc_tx_offload ol_para)
1447 if (!(ol_req & IGC_TX_OFFLOAD_SEG))
1449 if (ol_para.tso_segsz > IGC_TSO_MAX_MSS || ol_para.l2_len +
1450 ol_para.l3_len + ol_para.l4_len > IGC_TSO_MAX_HDRLEN) {
1451 ol_req &= ~IGC_TX_OFFLOAD_SEG;
1452 ol_req |= PKT_TX_TCP_CKSUM;
1458 * Check which hardware context can be used. Use the existing match
1459 * or create a new context descriptor.
1461 static inline uint32_t
1462 what_advctx_update(struct igc_tx_queue *txq, uint64_t flags,
1463 union igc_tx_offload tx_offload)
1465 uint32_t curr = txq->ctx_curr;
1467 /* If match with the current context */
1468 if (likely(txq->ctx_cache[curr].flags == flags &&
1469 txq->ctx_cache[curr].tx_offload.data ==
1470 (txq->ctx_cache[curr].tx_offload_mask.data &
1471 tx_offload.data))) {
1475 /* Total two context, if match with the second context */
1477 if (likely(txq->ctx_cache[curr].flags == flags &&
1478 txq->ctx_cache[curr].tx_offload.data ==
1479 (txq->ctx_cache[curr].tx_offload_mask.data &
1480 tx_offload.data))) {
1481 txq->ctx_curr = curr;
1485 /* Mismatch, create new one */
1490 * This is a separate function, looking for optimization opportunity here
1491 * Rework required to go with the pre-defined values.
1494 igc_set_xmit_ctx(struct igc_tx_queue *txq,
1495 volatile struct igc_adv_tx_context_desc *ctx_txd,
1496 uint64_t ol_flags, union igc_tx_offload tx_offload)
1498 uint32_t type_tucmd_mlhl;
1499 uint32_t mss_l4len_idx;
1501 uint32_t vlan_macip_lens;
1502 union igc_tx_offload tx_offload_mask;
1504 /* Use the previous context */
1506 ctx_curr = txq->ctx_curr;
1508 tx_offload_mask.data = 0;
1509 type_tucmd_mlhl = 0;
1511 /* Specify which HW CTX to upload. */
1512 mss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT);
1514 if (ol_flags & PKT_TX_VLAN_PKT)
1515 tx_offload_mask.vlan_tci = 0xffff;
1517 /* check if TCP segmentation required for this packet */
1518 if (ol_flags & IGC_TX_OFFLOAD_SEG) {
1519 /* implies IP cksum in IPv4 */
1520 if (ol_flags & PKT_TX_IP_CKSUM)
1521 type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4 |
1522 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1524 type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV6 |
1525 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1527 if (ol_flags & PKT_TX_TCP_SEG)
1528 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
1530 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
1532 tx_offload_mask.data |= TX_TSO_CMP_MASK;
1533 mss_l4len_idx |= (uint32_t)tx_offload.tso_segsz <<
1534 IGC_ADVTXD_MSS_SHIFT;
1535 mss_l4len_idx |= (uint32_t)tx_offload.l4_len <<
1536 IGC_ADVTXD_L4LEN_SHIFT;
1537 } else { /* no TSO, check if hardware checksum is needed */
1538 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1539 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
1541 if (ol_flags & PKT_TX_IP_CKSUM)
1542 type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4;
1544 switch (ol_flags & PKT_TX_L4_MASK) {
1545 case PKT_TX_TCP_CKSUM:
1546 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP |
1547 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1548 mss_l4len_idx |= (uint32_t)sizeof(struct rte_tcp_hdr)
1549 << IGC_ADVTXD_L4LEN_SHIFT;
1551 case PKT_TX_UDP_CKSUM:
1552 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP |
1553 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1554 mss_l4len_idx |= (uint32_t)sizeof(struct rte_udp_hdr)
1555 << IGC_ADVTXD_L4LEN_SHIFT;
1557 case PKT_TX_SCTP_CKSUM:
1558 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP |
1559 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1560 mss_l4len_idx |= (uint32_t)sizeof(struct rte_sctp_hdr)
1561 << IGC_ADVTXD_L4LEN_SHIFT;
1564 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_RSV |
1565 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1570 txq->ctx_cache[ctx_curr].flags = ol_flags;
1571 txq->ctx_cache[ctx_curr].tx_offload.data =
1572 tx_offload_mask.data & tx_offload.data;
1573 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
1575 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
1576 vlan_macip_lens = (uint32_t)tx_offload.data;
1577 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
1578 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
1579 ctx_txd->u.launch_time = 0;
1582 static inline uint32_t
1583 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
1586 static uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE};
1587 static uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE};
1588 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
1589 cmdtype |= tso_cmd[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
1593 static inline uint32_t
1594 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
1596 static const uint32_t l4_olinfo[2] = {0, IGC_ADVTXD_POPTS_TXSM};
1597 static const uint32_t l3_olinfo[2] = {0, IGC_ADVTXD_POPTS_IXSM};
1600 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
1601 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
1602 tmp |= l4_olinfo[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
1607 igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1609 struct igc_tx_queue * const txq = tx_queue;
1610 struct igc_tx_entry * const sw_ring = txq->sw_ring;
1611 struct igc_tx_entry *txe, *txn;
1612 volatile union igc_adv_tx_desc * const txr = txq->tx_ring;
1613 volatile union igc_adv_tx_desc *txd;
1614 struct rte_mbuf *tx_pkt;
1615 struct rte_mbuf *m_seg;
1616 uint64_t buf_dma_addr;
1617 uint32_t olinfo_status;
1618 uint32_t cmd_type_len;
1627 uint32_t new_ctx = 0;
1628 union igc_tx_offload tx_offload = {0};
1630 tx_id = txq->tx_tail;
1631 txe = &sw_ring[tx_id];
1633 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1634 tx_pkt = *tx_pkts++;
1635 pkt_len = tx_pkt->pkt_len;
1637 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1640 * The number of descriptors that must be allocated for a
1641 * packet is the number of segments of that packet, plus 1
1642 * Context Descriptor for the VLAN Tag Identifier, if any.
1643 * Determine the last TX descriptor to allocate in the TX ring
1644 * for the packet, starting from the current position (tx_id)
1647 tx_last = (uint16_t)(tx_id + tx_pkt->nb_segs - 1);
1649 ol_flags = tx_pkt->ol_flags;
1650 tx_ol_req = ol_flags & IGC_TX_OFFLOAD_MASK;
1652 /* If a Context Descriptor need be built . */
1654 tx_offload.l2_len = tx_pkt->l2_len;
1655 tx_offload.l3_len = tx_pkt->l3_len;
1656 tx_offload.l4_len = tx_pkt->l4_len;
1657 tx_offload.vlan_tci = tx_pkt->vlan_tci;
1658 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1659 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
1661 new_ctx = what_advctx_update(txq, tx_ol_req,
1663 /* Only allocate context descriptor if required*/
1664 new_ctx = (new_ctx >= IGC_CTX_NUM);
1665 tx_last = (uint16_t)(tx_last + new_ctx);
1667 if (tx_last >= txq->nb_tx_desc)
1668 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1671 "port_id=%u queue_id=%u pktlen=%u tx_first=%u tx_last=%u",
1672 txq->port_id, txq->queue_id, pkt_len, tx_id, tx_last);
1675 * Check if there are enough free descriptors in the TX ring
1676 * to transmit the next packet.
1677 * This operation is based on the two following rules:
1679 * 1- Only check that the last needed TX descriptor can be
1680 * allocated (by construction, if that descriptor is free,
1681 * all intermediate ones are also free).
1683 * For this purpose, the index of the last TX descriptor
1684 * used for a packet (the "last descriptor" of a packet)
1685 * is recorded in the TX entries (the last one included)
1686 * that are associated with all TX descriptors allocated
1689 * 2- Avoid to allocate the last free TX descriptor of the
1690 * ring, in order to never set the TDT register with the
1691 * same value stored in parallel by the NIC in the TDH
1692 * register, which makes the TX engine of the NIC enter
1693 * in a deadlock situation.
1695 * By extension, avoid to allocate a free descriptor that
1696 * belongs to the last set of free descriptors allocated
1697 * to the same packet previously transmitted.
1701 * The "last descriptor" of the previously sent packet, if any,
1702 * which used the last descriptor to allocate.
1704 tx_end = sw_ring[tx_last].last_id;
1707 * The next descriptor following that "last descriptor" in the
1710 tx_end = sw_ring[tx_end].next_id;
1713 * The "last descriptor" associated with that next descriptor.
1715 tx_end = sw_ring[tx_end].last_id;
1718 * Check that this descriptor is free.
1720 if (!(txr[tx_end].wb.status & IGC_TXD_STAT_DD)) {
1727 * Set common flags of all TX Data Descriptors.
1729 * The following bits must be set in all Data Descriptors:
1730 * - IGC_ADVTXD_DTYP_DATA
1731 * - IGC_ADVTXD_DCMD_DEXT
1733 * The following bits must be set in the first Data Descriptor
1734 * and are ignored in the other ones:
1735 * - IGC_ADVTXD_DCMD_IFCS
1736 * - IGC_ADVTXD_MAC_1588
1737 * - IGC_ADVTXD_DCMD_VLE
1739 * The following bits must only be set in the last Data
1743 * The following bits can be set in any Data Descriptor, but
1744 * are only set in the last Data Descriptor:
1747 cmd_type_len = txq->txd_type |
1748 IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT;
1749 if (tx_ol_req & IGC_TX_OFFLOAD_SEG)
1750 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len +
1752 olinfo_status = (pkt_len << IGC_ADVTXD_PAYLEN_SHIFT);
1755 * Timer 0 should be used to for packet timestamping,
1756 * sample the packet timestamp to reg 0
1758 if (ol_flags & PKT_TX_IEEE1588_TMST)
1759 cmd_type_len |= IGC_ADVTXD_MAC_TSTAMP;
1762 /* Setup TX Advanced context descriptor if required */
1764 volatile struct igc_adv_tx_context_desc *
1765 ctx_txd = (volatile struct
1766 igc_adv_tx_context_desc *)&txr[tx_id];
1768 txn = &sw_ring[txe->next_id];
1769 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1771 if (txe->mbuf != NULL) {
1772 rte_pktmbuf_free_seg(txe->mbuf);
1776 igc_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
1779 txe->last_id = tx_last;
1780 tx_id = txe->next_id;
1784 /* Setup the TX Advanced Data Descriptor */
1786 tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
1788 tx_desc_cksum_flags_to_olinfo(tx_ol_req);
1789 olinfo_status |= (uint32_t)txq->ctx_curr <<
1790 IGC_ADVTXD_IDX_SHIFT;
1795 txn = &sw_ring[txe->next_id];
1796 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1800 if (txe->mbuf != NULL)
1801 rte_pktmbuf_free_seg(txe->mbuf);
1804 /* Set up transmit descriptor */
1805 slen = (uint16_t)m_seg->data_len;
1806 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1807 txd->read.buffer_addr =
1808 rte_cpu_to_le_64(buf_dma_addr);
1809 txd->read.cmd_type_len =
1810 rte_cpu_to_le_32(cmd_type_len | slen);
1811 txd->read.olinfo_status =
1812 rte_cpu_to_le_32(olinfo_status);
1813 txe->last_id = tx_last;
1814 tx_id = txe->next_id;
1816 m_seg = m_seg->next;
1817 } while (m_seg != NULL);
1820 * The last packet data descriptor needs End Of Packet (EOP)
1821 * and Report Status (RS).
1823 txd->read.cmd_type_len |=
1824 rte_cpu_to_le_32(IGC_TXD_CMD_EOP | IGC_TXD_CMD_RS);
1830 * Set the Transmit Descriptor Tail (TDT).
1832 IGC_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
1833 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1834 txq->port_id, txq->queue_id, tx_id, nb_tx);
1835 txq->tx_tail = tx_id;
1840 int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset)
1842 struct igc_tx_queue *txq = tx_queue;
1843 volatile uint32_t *status;
1846 if (unlikely(!txq || offset >= txq->nb_tx_desc))
1849 desc = txq->tx_tail + offset;
1850 if (desc >= txq->nb_tx_desc)
1851 desc -= txq->nb_tx_desc;
1853 status = &txq->tx_ring[desc].wb.status;
1854 if (*status & rte_cpu_to_le_32(IGC_TXD_STAT_DD))
1855 return RTE_ETH_TX_DESC_DONE;
1857 return RTE_ETH_TX_DESC_FULL;
1861 igc_tx_queue_release_mbufs(struct igc_tx_queue *txq)
1865 if (txq->sw_ring != NULL) {
1866 for (i = 0; i < txq->nb_tx_desc; i++) {
1867 if (txq->sw_ring[i].mbuf != NULL) {
1868 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1869 txq->sw_ring[i].mbuf = NULL;
1876 igc_tx_queue_release(struct igc_tx_queue *txq)
1878 igc_tx_queue_release_mbufs(txq);
1879 rte_free(txq->sw_ring);
1883 void eth_igc_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
1885 if (dev->data->tx_queues[qid])
1886 igc_tx_queue_release(dev->data->tx_queues[qid]);
1890 igc_reset_tx_queue_stat(struct igc_tx_queue *txq)
1895 memset((void *)&txq->ctx_cache, 0,
1896 IGC_CTX_NUM * sizeof(struct igc_advctx_info));
1900 igc_reset_tx_queue(struct igc_tx_queue *txq)
1902 struct igc_tx_entry *txe = txq->sw_ring;
1905 /* Initialize ring entries */
1906 prev = (uint16_t)(txq->nb_tx_desc - 1);
1907 for (i = 0; i < txq->nb_tx_desc; i++) {
1908 volatile union igc_adv_tx_desc *txd = &txq->tx_ring[i];
1910 txd->wb.status = IGC_TXD_STAT_DD;
1913 txe[prev].next_id = i;
1917 txq->txd_type = IGC_ADVTXD_DTYP_DATA;
1918 igc_reset_tx_queue_stat(txq);
1922 * clear all rx/tx queue
1925 igc_dev_clear_queues(struct rte_eth_dev *dev)
1928 struct igc_tx_queue *txq;
1929 struct igc_rx_queue *rxq;
1931 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1932 txq = dev->data->tx_queues[i];
1934 igc_tx_queue_release_mbufs(txq);
1935 igc_reset_tx_queue(txq);
1939 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1940 rxq = dev->data->rx_queues[i];
1942 igc_rx_queue_release_mbufs(rxq);
1943 igc_reset_rx_queue(rxq);
1948 int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1949 uint16_t nb_desc, unsigned int socket_id,
1950 const struct rte_eth_txconf *tx_conf)
1952 const struct rte_memzone *tz;
1953 struct igc_tx_queue *txq;
1957 if (nb_desc % IGC_TX_DESCRIPTOR_MULTIPLE != 0 ||
1958 nb_desc > IGC_MAX_TXD || nb_desc < IGC_MIN_TXD) {
1960 "TX-descriptor must be a multiple of %u and between %u and %u, cur: %u",
1961 IGC_TX_DESCRIPTOR_MULTIPLE,
1962 IGC_MAX_TXD, IGC_MIN_TXD, nb_desc);
1966 hw = IGC_DEV_PRIVATE_HW(dev);
1969 * The tx_free_thresh and tx_rs_thresh values are not used in the 2.5G
1972 if (tx_conf->tx_free_thresh != 0)
1974 "The tx_free_thresh parameter is not used for the 2.5G driver");
1975 if (tx_conf->tx_rs_thresh != 0)
1977 "The tx_rs_thresh parameter is not used for the 2.5G driver");
1978 if (tx_conf->tx_thresh.wthresh == 0)
1980 "To improve 2.5G driver performance, consider setting the TX WTHRESH value to 4, 8, or 16.");
1982 /* Free memory prior to re-allocation if needed */
1983 if (dev->data->tx_queues[queue_idx] != NULL) {
1984 igc_tx_queue_release(dev->data->tx_queues[queue_idx]);
1985 dev->data->tx_queues[queue_idx] = NULL;
1988 /* First allocate the tx queue data structure */
1989 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igc_tx_queue),
1990 RTE_CACHE_LINE_SIZE);
1995 * Allocate TX ring hardware descriptors. A memzone large enough to
1996 * handle the maximum ring size is allocated in order to allow for
1997 * resizing in later calls to the queue setup function.
1999 size = sizeof(union igc_adv_tx_desc) * IGC_MAX_TXD;
2000 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
2001 IGC_ALIGN, socket_id);
2003 igc_tx_queue_release(txq);
2007 txq->nb_tx_desc = nb_desc;
2008 txq->pthresh = tx_conf->tx_thresh.pthresh;
2009 txq->hthresh = tx_conf->tx_thresh.hthresh;
2010 txq->wthresh = tx_conf->tx_thresh.wthresh;
2012 txq->queue_id = queue_idx;
2013 txq->reg_idx = queue_idx;
2014 txq->port_id = dev->data->port_id;
2016 txq->tdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_TDT(txq->reg_idx));
2017 txq->tx_ring_phys_addr = tz->iova;
2019 txq->tx_ring = (union igc_adv_tx_desc *)tz->addr;
2020 /* Allocate software ring */
2021 txq->sw_ring = rte_zmalloc("txq->sw_ring",
2022 sizeof(struct igc_tx_entry) * nb_desc,
2023 RTE_CACHE_LINE_SIZE);
2024 if (txq->sw_ring == NULL) {
2025 igc_tx_queue_release(txq);
2028 PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2029 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2031 igc_reset_tx_queue(txq);
2032 dev->tx_pkt_burst = igc_xmit_pkts;
2033 dev->tx_pkt_prepare = ð_igc_prep_pkts;
2034 dev->data->tx_queues[queue_idx] = txq;
2035 txq->offloads = tx_conf->offloads;
2041 eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)
2043 struct igc_tx_queue *txq = txqueue;
2044 struct igc_tx_entry *sw_ring;
2045 volatile union igc_adv_tx_desc *txr;
2046 uint16_t tx_first; /* First segment analyzed. */
2047 uint16_t tx_id; /* Current segment being processed. */
2048 uint16_t tx_last; /* Last segment in the current packet. */
2049 uint16_t tx_next; /* First segment of the next packet. */
2056 sw_ring = txq->sw_ring;
2060 * tx_tail is the last sent packet on the sw_ring. Goto the end
2061 * of that packet (the last segment in the packet chain) and
2062 * then the next segment will be the start of the oldest segment
2063 * in the sw_ring. This is the first packet that will be
2064 * attempted to be freed.
2067 /* Get last segment in most recently added packet. */
2068 tx_first = sw_ring[txq->tx_tail].last_id;
2070 /* Get the next segment, which is the oldest segment in ring. */
2071 tx_first = sw_ring[tx_first].next_id;
2073 /* Set the current index to the first. */
2077 * Loop through each packet. For each packet, verify that an
2078 * mbuf exists and that the last segment is free. If so, free
2082 tx_last = sw_ring[tx_id].last_id;
2084 if (sw_ring[tx_last].mbuf) {
2085 if (!(txr[tx_last].wb.status &
2086 rte_cpu_to_le_32(IGC_TXD_STAT_DD)))
2089 /* Get the start of the next packet. */
2090 tx_next = sw_ring[tx_last].next_id;
2093 * Loop through all segments in a
2097 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
2098 sw_ring[tx_id].mbuf = NULL;
2099 sw_ring[tx_id].last_id = tx_id;
2101 /* Move to next segemnt. */
2102 tx_id = sw_ring[tx_id].next_id;
2103 } while (tx_id != tx_next);
2106 * Increment the number of packets
2110 if (unlikely(count == free_cnt))
2114 * There are multiple reasons to be here:
2115 * 1) All the packets on the ring have been
2116 * freed - tx_id is equal to tx_first
2117 * and some packets have been freed.
2119 * 2) Interfaces has not sent a rings worth of
2120 * packets yet, so the segment after tail is
2121 * still empty. Or a previous call to this
2122 * function freed some of the segments but
2123 * not all so there is a hole in the list.
2124 * Hopefully this is a rare case.
2125 * - Walk the list and find the next mbuf. If
2126 * there isn't one, then done.
2128 if (likely(tx_id == tx_first && count != 0))
2132 * Walk the list and find the next mbuf, if any.
2135 /* Move to next segemnt. */
2136 tx_id = sw_ring[tx_id].next_id;
2138 if (sw_ring[tx_id].mbuf)
2141 } while (tx_id != tx_first);
2144 * Determine why previous loop bailed. If there
2145 * is not an mbuf, done.
2147 if (sw_ring[tx_id].mbuf == NULL)
2156 igc_tx_init(struct rte_eth_dev *dev)
2158 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2163 /* Setup the Base and Length of the Tx Descriptor Rings. */
2164 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2165 struct igc_tx_queue *txq = dev->data->tx_queues[i];
2166 uint64_t bus_addr = txq->tx_ring_phys_addr;
2168 IGC_WRITE_REG(hw, IGC_TDLEN(txq->reg_idx),
2170 sizeof(union igc_adv_tx_desc));
2171 IGC_WRITE_REG(hw, IGC_TDBAH(txq->reg_idx),
2172 (uint32_t)(bus_addr >> 32));
2173 IGC_WRITE_REG(hw, IGC_TDBAL(txq->reg_idx),
2174 (uint32_t)bus_addr);
2176 /* Setup the HW Tx Head and Tail descriptor pointers. */
2177 IGC_WRITE_REG(hw, IGC_TDT(txq->reg_idx), 0);
2178 IGC_WRITE_REG(hw, IGC_TDH(txq->reg_idx), 0);
2180 /* Setup Transmit threshold registers. */
2181 txdctl = ((uint32_t)txq->pthresh << IGC_TXDCTL_PTHRESH_SHIFT) &
2182 IGC_TXDCTL_PTHRESH_MSK;
2183 txdctl |= ((uint32_t)txq->hthresh << IGC_TXDCTL_HTHRESH_SHIFT) &
2184 IGC_TXDCTL_HTHRESH_MSK;
2185 txdctl |= ((uint32_t)txq->wthresh << IGC_TXDCTL_WTHRESH_SHIFT) &
2186 IGC_TXDCTL_WTHRESH_MSK;
2187 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
2188 IGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl);
2191 igc_config_collision_dist(hw);
2193 /* Program the Transmit Control Register. */
2194 tctl = IGC_READ_REG(hw, IGC_TCTL);
2195 tctl &= ~IGC_TCTL_CT;
2196 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
2197 ((uint32_t)IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
2199 /* This write will effectively turn on the transmit unit. */
2200 IGC_WRITE_REG(hw, IGC_TCTL, tctl);
2204 eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2205 struct rte_eth_rxq_info *qinfo)
2207 struct igc_rx_queue *rxq;
2209 rxq = dev->data->rx_queues[queue_id];
2211 qinfo->mp = rxq->mb_pool;
2212 qinfo->scattered_rx = dev->data->scattered_rx;
2213 qinfo->nb_desc = rxq->nb_rx_desc;
2215 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2216 qinfo->conf.rx_drop_en = rxq->drop_en;
2217 qinfo->conf.offloads = rxq->offloads;
2218 qinfo->conf.rx_thresh.hthresh = rxq->hthresh;
2219 qinfo->conf.rx_thresh.pthresh = rxq->pthresh;
2220 qinfo->conf.rx_thresh.wthresh = rxq->wthresh;
2224 eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2225 struct rte_eth_txq_info *qinfo)
2227 struct igc_tx_queue *txq;
2229 txq = dev->data->tx_queues[queue_id];
2231 qinfo->nb_desc = txq->nb_tx_desc;
2233 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2234 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2235 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2236 qinfo->conf.offloads = txq->offloads;
2240 eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
2241 uint16_t rx_queue_id, int on)
2243 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2244 struct igc_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
2247 if (rx_queue_id >= IGC_QUEUE_PAIRS_NUM) {
2248 PMD_DRV_LOG(ERR, "Queue index(%u) illegal, max is %u",
2249 rx_queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2253 reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
2255 reg_val |= IGC_DVMOLR_STRVLAN;
2256 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2258 reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN);
2259 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2262 IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);