1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2020 Intel Corporation
5 #include <rte_config.h>
6 #include <rte_malloc.h>
7 #include <rte_ethdev_driver.h>
13 #ifdef RTE_PMD_USE_PREFETCH
14 #define rte_igc_prefetch(p) rte_prefetch0(p)
16 #define rte_igc_prefetch(p) do {} while (0)
19 #ifdef RTE_PMD_PACKET_PREFETCH
20 #define rte_packet_prefetch(p) rte_prefetch1(p)
22 #define rte_packet_prefetch(p) do {} while (0)
25 /* Multicast / Unicast table offset mask. */
26 #define IGC_RCTL_MO_MSK (3u << IGC_RCTL_MO_SHIFT)
29 #define IGC_RCTL_LBM_SHIFT 6
30 #define IGC_RCTL_LBM_MSK (3u << IGC_RCTL_LBM_SHIFT)
32 /* Hash select for MTA */
33 #define IGC_RCTL_HSEL_SHIFT 8
34 #define IGC_RCTL_HSEL_MSK (3u << IGC_RCTL_HSEL_SHIFT)
35 #define IGC_RCTL_PSP (1u << 21)
37 /* Receive buffer size for header buffer */
38 #define IGC_SRRCTL_BSIZEHEADER_SHIFT 8
40 /* RX descriptor status and error flags */
41 #define IGC_RXD_STAT_L4CS (1u << 5)
42 #define IGC_RXD_STAT_VEXT (1u << 9)
43 #define IGC_RXD_STAT_LLINT (1u << 11)
44 #define IGC_RXD_STAT_SCRC (1u << 12)
45 #define IGC_RXD_STAT_SMDT_MASK (3u << 13)
46 #define IGC_RXD_STAT_MC (1u << 19)
47 #define IGC_RXD_EXT_ERR_L4E (1u << 29)
48 #define IGC_RXD_EXT_ERR_IPE (1u << 30)
49 #define IGC_RXD_EXT_ERR_RXE (1u << 31)
50 #define IGC_RXD_RSS_TYPE_MASK 0xfu
51 #define IGC_RXD_PCTYPE_MASK (0x7fu << 4)
52 #define IGC_RXD_ETQF_SHIFT 12
53 #define IGC_RXD_ETQF_MSK (0xfu << IGC_RXD_ETQF_SHIFT)
54 #define IGC_RXD_VPKT (1u << 16)
56 /* TXD control bits */
57 #define IGC_TXDCTL_PTHRESH_SHIFT 0
58 #define IGC_TXDCTL_HTHRESH_SHIFT 8
59 #define IGC_TXDCTL_WTHRESH_SHIFT 16
60 #define IGC_TXDCTL_PTHRESH_MSK (0x1fu << IGC_TXDCTL_PTHRESH_SHIFT)
61 #define IGC_TXDCTL_HTHRESH_MSK (0x1fu << IGC_TXDCTL_HTHRESH_SHIFT)
62 #define IGC_TXDCTL_WTHRESH_MSK (0x1fu << IGC_TXDCTL_WTHRESH_SHIFT)
64 /* RXD control bits */
65 #define IGC_RXDCTL_PTHRESH_SHIFT 0
66 #define IGC_RXDCTL_HTHRESH_SHIFT 8
67 #define IGC_RXDCTL_WTHRESH_SHIFT 16
68 #define IGC_RXDCTL_PTHRESH_MSK (0x1fu << IGC_RXDCTL_PTHRESH_SHIFT)
69 #define IGC_RXDCTL_HTHRESH_MSK (0x1fu << IGC_RXDCTL_HTHRESH_SHIFT)
70 #define IGC_RXDCTL_WTHRESH_MSK (0x1fu << IGC_RXDCTL_WTHRESH_SHIFT)
72 #define IGC_TSO_MAX_HDRLEN 512
73 #define IGC_TSO_MAX_MSS 9216
75 /* Bit Mask to indicate what bits required for building TX context */
76 #define IGC_TX_OFFLOAD_MASK ( \
86 #define IGC_TX_OFFLOAD_SEG (PKT_TX_TCP_SEG | PKT_TX_UDP_SEG)
88 #define IGC_ADVTXD_POPTS_TXSM 0x00000200 /* L4 Checksum offload request */
89 #define IGC_ADVTXD_POPTS_IXSM 0x00000100 /* IP Checksum offload request */
91 /* L4 Packet TYPE of Reserved */
92 #define IGC_ADVTXD_TUCMD_L4T_RSV 0x00001800
94 #define IGC_TX_OFFLOAD_NOTSUP_MASK (PKT_TX_OFFLOAD_MASK ^ IGC_TX_OFFLOAD_MASK)
97 * Structure associated with each descriptor of the RX ring of a RX queue.
100 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
104 * Structure associated with each RX queue.
106 struct igc_rx_queue {
107 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
108 volatile union igc_adv_rx_desc *rx_ring;
109 /**< RX ring virtual address. */
110 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
111 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
112 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
113 struct igc_rx_entry *sw_ring; /**< address of RX software ring. */
114 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
115 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
116 uint16_t nb_rx_desc; /**< number of RX descriptors. */
117 uint16_t rx_tail; /**< current value of RDT register. */
118 uint16_t nb_rx_hold; /**< number of held free RX desc. */
119 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
120 uint16_t queue_id; /**< RX queue index. */
121 uint16_t reg_idx; /**< RX queue register index. */
122 uint16_t port_id; /**< Device port identifier. */
123 uint8_t pthresh; /**< Prefetch threshold register. */
124 uint8_t hthresh; /**< Host threshold register. */
125 uint8_t wthresh; /**< Write-back threshold register. */
126 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
127 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
128 uint32_t flags; /**< RX flags. */
129 uint64_t offloads; /**< offloads of DEV_RX_OFFLOAD_* */
132 /** Offload features */
133 union igc_tx_offload {
136 uint64_t l3_len:9; /**< L3 (IP) Header Length. */
137 uint64_t l2_len:7; /**< L2 (MAC) Header Length. */
138 uint64_t vlan_tci:16;
139 /**< VLAN Tag Control Identifier(CPU order). */
140 uint64_t l4_len:8; /**< L4 (TCP/UDP) Header Length. */
141 uint64_t tso_segsz:16; /**< TCP TSO segment size. */
142 /* uint64_t unused:8; */
147 * Compare mask for igc_tx_offload.data,
148 * should be in sync with igc_tx_offload layout.
150 #define TX_MACIP_LEN_CMP_MASK 0x000000000000FFFFULL /**< L2L3 header mask. */
151 #define TX_VLAN_CMP_MASK 0x00000000FFFF0000ULL /**< Vlan mask. */
152 #define TX_TCP_LEN_CMP_MASK 0x000000FF00000000ULL /**< TCP header mask. */
153 #define TX_TSO_MSS_CMP_MASK 0x00FFFF0000000000ULL /**< TSO segsz mask. */
154 /** Mac + IP + TCP + Mss mask. */
155 #define TX_TSO_CMP_MASK \
156 (TX_MACIP_LEN_CMP_MASK | TX_TCP_LEN_CMP_MASK | TX_TSO_MSS_CMP_MASK)
159 * Structure to check if new context need be built
161 struct igc_advctx_info {
162 uint64_t flags; /**< ol_flags related to context build. */
163 /** tx offload: vlan, tso, l2-l3-l4 lengths. */
164 union igc_tx_offload tx_offload;
165 /** compare mask for tx offload. */
166 union igc_tx_offload tx_offload_mask;
170 * Hardware context number
173 IGC_CTX_0 = 0, /**< CTX0 */
174 IGC_CTX_1 = 1, /**< CTX1 */
175 IGC_CTX_NUM = 2, /**< CTX_NUM */
179 * Structure associated with each descriptor of the TX ring of a TX queue.
181 struct igc_tx_entry {
182 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
183 uint16_t next_id; /**< Index of next descriptor in ring. */
184 uint16_t last_id; /**< Index of last scattered descriptor. */
188 * Structure associated with each TX queue.
190 struct igc_tx_queue {
191 volatile union igc_adv_tx_desc *tx_ring; /**< TX ring address */
192 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
193 struct igc_tx_entry *sw_ring; /**< virtual address of SW ring. */
194 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
195 uint32_t txd_type; /**< Device-specific TXD type */
196 uint16_t nb_tx_desc; /**< number of TX descriptors. */
197 uint16_t tx_tail; /**< Current value of TDT register. */
199 /**< Index of first used TX descriptor. */
200 uint16_t queue_id; /**< TX queue index. */
201 uint16_t reg_idx; /**< TX queue register index. */
202 uint16_t port_id; /**< Device port identifier. */
203 uint8_t pthresh; /**< Prefetch threshold register. */
204 uint8_t hthresh; /**< Host threshold register. */
205 uint8_t wthresh; /**< Write-back threshold register. */
208 /**< Start context position for transmit queue. */
209 struct igc_advctx_info ctx_cache[IGC_CTX_NUM];
210 /**< Hardware context history.*/
211 uint64_t offloads; /**< offloads of DEV_TX_OFFLOAD_* */
214 static inline uint64_t
215 rx_desc_statuserr_to_pkt_flags(uint32_t statuserr)
217 static uint64_t l4_chksum_flags[] = {0, 0, PKT_RX_L4_CKSUM_GOOD,
218 PKT_RX_L4_CKSUM_BAD};
220 static uint64_t l3_chksum_flags[] = {0, 0, PKT_RX_IP_CKSUM_GOOD,
221 PKT_RX_IP_CKSUM_BAD};
222 uint64_t pkt_flags = 0;
225 if (statuserr & IGC_RXD_STAT_VP)
226 pkt_flags |= PKT_RX_VLAN_STRIPPED;
228 tmp = !!(statuserr & (IGC_RXD_STAT_L4CS | IGC_RXD_STAT_UDPCS));
229 tmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_L4E);
230 pkt_flags |= l4_chksum_flags[tmp];
232 tmp = !!(statuserr & IGC_RXD_STAT_IPCS);
233 tmp = (tmp << 1) | (uint32_t)!!(statuserr & IGC_RXD_EXT_ERR_IPE);
234 pkt_flags |= l3_chksum_flags[tmp];
239 #define IGC_PACKET_TYPE_IPV4 0X01
240 #define IGC_PACKET_TYPE_IPV4_TCP 0X11
241 #define IGC_PACKET_TYPE_IPV4_UDP 0X21
242 #define IGC_PACKET_TYPE_IPV4_SCTP 0X41
243 #define IGC_PACKET_TYPE_IPV4_EXT 0X03
244 #define IGC_PACKET_TYPE_IPV4_EXT_SCTP 0X43
245 #define IGC_PACKET_TYPE_IPV6 0X04
246 #define IGC_PACKET_TYPE_IPV6_TCP 0X14
247 #define IGC_PACKET_TYPE_IPV6_UDP 0X24
248 #define IGC_PACKET_TYPE_IPV6_EXT 0X0C
249 #define IGC_PACKET_TYPE_IPV6_EXT_TCP 0X1C
250 #define IGC_PACKET_TYPE_IPV6_EXT_UDP 0X2C
251 #define IGC_PACKET_TYPE_IPV4_IPV6 0X05
252 #define IGC_PACKET_TYPE_IPV4_IPV6_TCP 0X15
253 #define IGC_PACKET_TYPE_IPV4_IPV6_UDP 0X25
254 #define IGC_PACKET_TYPE_IPV4_IPV6_EXT 0X0D
255 #define IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D
256 #define IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D
257 #define IGC_PACKET_TYPE_MAX 0X80
258 #define IGC_PACKET_TYPE_MASK 0X7F
259 #define IGC_PACKET_TYPE_SHIFT 0X04
261 static inline uint32_t
262 rx_desc_pkt_info_to_pkt_type(uint32_t pkt_info)
264 static const uint32_t
265 ptype_table[IGC_PACKET_TYPE_MAX] __rte_cache_aligned = {
266 [IGC_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER |
268 [IGC_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER |
269 RTE_PTYPE_L3_IPV4_EXT,
270 [IGC_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER |
272 [IGC_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER |
273 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
274 RTE_PTYPE_INNER_L3_IPV6,
275 [IGC_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
276 RTE_PTYPE_L3_IPV6_EXT,
277 [IGC_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER |
278 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
279 RTE_PTYPE_INNER_L3_IPV6_EXT,
280 [IGC_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER |
281 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
282 [IGC_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
283 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
284 [IGC_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER |
285 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
286 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP,
287 [IGC_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
288 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
289 [IGC_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER |
290 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
291 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP,
292 [IGC_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER |
293 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
294 [IGC_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
295 RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
296 [IGC_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER |
297 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
298 RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP,
299 [IGC_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
300 RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
301 [IGC_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER |
302 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP |
303 RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP,
304 [IGC_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER |
305 RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP,
306 [IGC_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER |
307 RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP,
309 if (unlikely(pkt_info & IGC_RXDADV_PKTTYPE_ETQF))
310 return RTE_PTYPE_UNKNOWN;
312 pkt_info = (pkt_info >> IGC_PACKET_TYPE_SHIFT) & IGC_PACKET_TYPE_MASK;
314 return ptype_table[pkt_info];
318 rx_desc_get_pkt_info(struct igc_rx_queue *rxq, struct rte_mbuf *rxm,
319 union igc_adv_rx_desc *rxd, uint32_t staterr)
322 uint32_t hlen_type_rss;
325 /* Prefetch data of first segment, if configured to do so. */
326 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
328 rxm->port = rxq->port_id;
329 hlen_type_rss = rte_le_to_cpu_32(rxd->wb.lower.lo_dword.data);
330 rxm->hash.rss = rte_le_to_cpu_32(rxd->wb.lower.hi_dword.rss);
331 rxm->vlan_tci = rte_le_to_cpu_16(rxd->wb.upper.vlan);
333 pkt_flags = (hlen_type_rss & IGC_RXD_RSS_TYPE_MASK) ?
336 if (hlen_type_rss & IGC_RXD_VPKT)
337 pkt_flags |= PKT_RX_VLAN;
339 pkt_flags |= rx_desc_statuserr_to_pkt_flags(staterr);
341 rxm->ol_flags = pkt_flags;
342 pkt_info = rte_le_to_cpu_16(rxd->wb.lower.lo_dword.hs_rss.pkt_info);
343 rxm->packet_type = rx_desc_pkt_info_to_pkt_type(pkt_info);
347 igc_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
349 struct igc_rx_queue * const rxq = rx_queue;
350 volatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring;
351 struct igc_rx_entry * const sw_ring = rxq->sw_ring;
352 uint16_t rx_id = rxq->rx_tail;
354 uint16_t nb_hold = 0;
356 while (nb_rx < nb_pkts) {
357 volatile union igc_adv_rx_desc *rxdp;
358 struct igc_rx_entry *rxe;
359 struct rte_mbuf *rxm;
360 struct rte_mbuf *nmb;
361 union igc_adv_rx_desc rxd;
366 * The order of operations here is important as the DD status
367 * bit must not be read after any other descriptor fields.
368 * rx_ring and rxdp are pointing to volatile data so the order
369 * of accesses cannot be reordered by the compiler. If they were
370 * not volatile, they could be reordered which could lead to
371 * using invalid descriptor fields when read from rxd.
373 rxdp = &rx_ring[rx_id];
374 staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);
375 if (!(staterr & IGC_RXD_STAT_DD))
382 * If the IGC_RXD_STAT_EOP flag is not set, the RX packet is
383 * likely to be invalid and to be dropped by the various
384 * validation checks performed by the network stack.
386 * Allocate a new mbuf to replenish the RX ring descriptor.
387 * If the allocation fails:
388 * - arrange for that RX descriptor to be the first one
389 * being parsed the next time the receive function is
390 * invoked [on the same queue].
392 * - Stop parsing the RX ring and return immediately.
394 * This policy does not drop the packet received in the RX
395 * descriptor for which the allocation of a new mbuf failed.
396 * Thus, it allows that packet to be later retrieved if
397 * mbuf have been freed in the mean time.
398 * As a side effect, holding RX descriptors instead of
399 * systematically giving them back to the NIC may lead to
400 * RX ring exhaustion situations.
401 * However, the NIC can gracefully prevent such situations
402 * to happen by sending specific "back-pressure" flow control
403 * frames to its peer(s).
406 "port_id=%u queue_id=%u rx_id=%u staterr=0x%x data_len=%u",
407 rxq->port_id, rxq->queue_id, rx_id, staterr,
408 rte_le_to_cpu_16(rxd.wb.upper.length));
410 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
414 "RX mbuf alloc failed, port_id=%u queue_id=%u",
415 rxq->port_id, rxq->queue_id);
417 rte_eth_devices[id].data->rx_mbuf_alloc_failed++;
422 rxe = &sw_ring[rx_id];
424 if (rx_id >= rxq->nb_rx_desc)
427 /* Prefetch next mbuf while processing current one. */
428 rte_igc_prefetch(sw_ring[rx_id].mbuf);
431 * When next RX descriptor is on a cache-line boundary,
432 * prefetch the next 4 RX descriptors and the next 8 pointers
435 if ((rx_id & 0x3) == 0) {
436 rte_igc_prefetch(&rx_ring[rx_id]);
437 rte_igc_prefetch(&sw_ring[rx_id]);
441 * Update RX descriptor with the physical address of the new
442 * data buffer of the new allocated mbuf.
446 rxdp->read.hdr_addr = 0;
447 rxdp->read.pkt_addr =
448 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
451 rxm->data_off = RTE_PKTMBUF_HEADROOM;
452 data_len = rte_le_to_cpu_16(rxd.wb.upper.length) - rxq->crc_len;
453 rxm->data_len = data_len;
454 rxm->pkt_len = data_len;
457 rx_desc_get_pkt_info(rxq, rxm, &rxd, staterr);
460 * Store the mbuf address into the next entry of the array
461 * of returned packets.
463 rx_pkts[nb_rx++] = rxm;
465 rxq->rx_tail = rx_id;
468 * If the number of free RX descriptors is greater than the RX free
469 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
471 * Update the RDT with the value of the last processed RX descriptor
472 * minus 1, to guarantee that the RDT register is never equal to the
473 * RDH register, which creates a "full" ring situation from the
474 * hardware point of view...
476 nb_hold = nb_hold + rxq->nb_rx_hold;
477 if (nb_hold > rxq->rx_free_thresh) {
479 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
480 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
481 rx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1);
482 IGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
485 rxq->nb_rx_hold = nb_hold;
490 igc_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
493 struct igc_rx_queue * const rxq = rx_queue;
494 volatile union igc_adv_rx_desc * const rx_ring = rxq->rx_ring;
495 struct igc_rx_entry * const sw_ring = rxq->sw_ring;
496 struct rte_mbuf *first_seg = rxq->pkt_first_seg;
497 struct rte_mbuf *last_seg = rxq->pkt_last_seg;
499 uint16_t rx_id = rxq->rx_tail;
501 uint16_t nb_hold = 0;
503 while (nb_rx < nb_pkts) {
504 volatile union igc_adv_rx_desc *rxdp;
505 struct igc_rx_entry *rxe;
506 struct rte_mbuf *rxm;
507 struct rte_mbuf *nmb;
508 union igc_adv_rx_desc rxd;
514 * The order of operations here is important as the DD status
515 * bit must not be read after any other descriptor fields.
516 * rx_ring and rxdp are pointing to volatile data so the order
517 * of accesses cannot be reordered by the compiler. If they were
518 * not volatile, they could be reordered which could lead to
519 * using invalid descriptor fields when read from rxd.
521 rxdp = &rx_ring[rx_id];
522 staterr = rte_cpu_to_le_32(rxdp->wb.upper.status_error);
523 if (!(staterr & IGC_RXD_STAT_DD))
530 * Allocate a new mbuf to replenish the RX ring descriptor.
531 * If the allocation fails:
532 * - arrange for that RX descriptor to be the first one
533 * being parsed the next time the receive function is
534 * invoked [on the same queue].
536 * - Stop parsing the RX ring and return immediately.
538 * This policy does not drop the packet received in the RX
539 * descriptor for which the allocation of a new mbuf failed.
540 * Thus, it allows that packet to be later retrieved if
541 * mbuf have been freed in the mean time.
542 * As a side effect, holding RX descriptors instead of
543 * systematically giving them back to the NIC may lead to
544 * RX ring exhaustion situations.
545 * However, the NIC can gracefully prevent such situations
546 * to happen by sending specific "back-pressure" flow control
547 * frames to its peer(s).
550 "port_id=%u queue_id=%u rx_id=%u staterr=0x%x data_len=%u",
551 rxq->port_id, rxq->queue_id, rx_id, staterr,
552 rte_le_to_cpu_16(rxd.wb.upper.length));
554 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
558 "RX mbuf alloc failed, port_id=%u queue_id=%u",
559 rxq->port_id, rxq->queue_id);
561 rte_eth_devices[id].data->rx_mbuf_alloc_failed++;
566 rxe = &sw_ring[rx_id];
568 if (rx_id >= rxq->nb_rx_desc)
571 /* Prefetch next mbuf while processing current one. */
572 rte_igc_prefetch(sw_ring[rx_id].mbuf);
575 * When next RX descriptor is on a cache-line boundary,
576 * prefetch the next 4 RX descriptors and the next 8 pointers
579 if ((rx_id & 0x3) == 0) {
580 rte_igc_prefetch(&rx_ring[rx_id]);
581 rte_igc_prefetch(&sw_ring[rx_id]);
585 * Update RX descriptor with the physical address of the new
586 * data buffer of the new allocated mbuf.
590 rxdp->read.hdr_addr = 0;
591 rxdp->read.pkt_addr =
592 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
596 * Set data length & data buffer address of mbuf.
598 rxm->data_off = RTE_PKTMBUF_HEADROOM;
599 data_len = rte_le_to_cpu_16(rxd.wb.upper.length);
600 rxm->data_len = data_len;
603 * If this is the first buffer of the received packet,
604 * set the pointer to the first mbuf of the packet and
605 * initialize its context.
606 * Otherwise, update the total length and the number of segments
607 * of the current scattered packet, and update the pointer to
608 * the last mbuf of the current packet.
610 if (first_seg == NULL) {
612 first_seg->pkt_len = data_len;
613 first_seg->nb_segs = 1;
615 first_seg->pkt_len += data_len;
616 first_seg->nb_segs++;
617 last_seg->next = rxm;
621 * If this is not the last buffer of the received packet,
622 * update the pointer to the last mbuf of the current scattered
623 * packet and continue to parse the RX ring.
625 if (!(staterr & IGC_RXD_STAT_EOP)) {
631 * This is the last buffer of the received packet.
632 * If the CRC is not stripped by the hardware:
633 * - Subtract the CRC length from the total packet length.
634 * - If the last buffer only contains the whole CRC or a part
635 * of it, free the mbuf associated to the last buffer.
636 * If part of the CRC is also contained in the previous
637 * mbuf, subtract the length of that CRC part from the
638 * data length of the previous mbuf.
640 if (unlikely(rxq->crc_len > 0)) {
641 first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
642 if (data_len <= RTE_ETHER_CRC_LEN) {
643 rte_pktmbuf_free_seg(rxm);
644 first_seg->nb_segs--;
645 last_seg->data_len = last_seg->data_len -
646 (RTE_ETHER_CRC_LEN - data_len);
647 last_seg->next = NULL;
649 rxm->data_len = (uint16_t)
650 (data_len - RTE_ETHER_CRC_LEN);
654 rx_desc_get_pkt_info(rxq, first_seg, &rxd, staterr);
657 * Store the mbuf address into the next entry of the array
658 * of returned packets.
660 rx_pkts[nb_rx++] = first_seg;
662 /* Setup receipt context for a new packet. */
665 rxq->rx_tail = rx_id;
668 * Save receive context.
670 rxq->pkt_first_seg = first_seg;
671 rxq->pkt_last_seg = last_seg;
674 * If the number of free RX descriptors is greater than the RX free
675 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
677 * Update the RDT with the value of the last processed RX descriptor
678 * minus 1, to guarantee that the RDT register is never equal to the
679 * RDH register, which creates a "full" ring situation from the
680 * hardware point of view...
682 nb_hold = nb_hold + rxq->nb_rx_hold;
683 if (nb_hold > rxq->rx_free_thresh) {
685 "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u nb_rx=%u",
686 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
687 rx_id = (rx_id == 0) ? (rxq->nb_rx_desc - 1) : (rx_id - 1);
688 IGC_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id);
691 rxq->nb_rx_hold = nb_hold;
696 igc_rx_queue_release_mbufs(struct igc_rx_queue *rxq)
700 if (rxq->sw_ring != NULL) {
701 for (i = 0; i < rxq->nb_rx_desc; i++) {
702 if (rxq->sw_ring[i].mbuf != NULL) {
703 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
704 rxq->sw_ring[i].mbuf = NULL;
711 igc_rx_queue_release(struct igc_rx_queue *rxq)
713 igc_rx_queue_release_mbufs(rxq);
714 rte_free(rxq->sw_ring);
718 void eth_igc_rx_queue_release(void *rxq)
721 igc_rx_queue_release(rxq);
724 uint32_t eth_igc_rx_queue_count(struct rte_eth_dev *dev,
725 uint16_t rx_queue_id)
728 * Check the DD bit of a rx descriptor of each 4 in a group,
729 * to avoid checking too frequently and downgrading performance
732 #define IGC_RXQ_SCAN_INTERVAL 4
734 volatile union igc_adv_rx_desc *rxdp;
735 struct igc_rx_queue *rxq;
738 rxq = dev->data->rx_queues[rx_queue_id];
739 rxdp = &rxq->rx_ring[rxq->rx_tail];
741 while (desc < rxq->nb_rx_desc - rxq->rx_tail) {
742 if (unlikely(!(rxdp->wb.upper.status_error &
745 desc += IGC_RXQ_SCAN_INTERVAL;
746 rxdp += IGC_RXQ_SCAN_INTERVAL;
748 rxdp = &rxq->rx_ring[rxq->rx_tail + desc - rxq->nb_rx_desc];
750 while (desc < rxq->nb_rx_desc &&
751 (rxdp->wb.upper.status_error & IGC_RXD_STAT_DD)) {
752 desc += IGC_RXQ_SCAN_INTERVAL;
753 rxdp += IGC_RXQ_SCAN_INTERVAL;
759 int eth_igc_rx_descriptor_done(void *rx_queue, uint16_t offset)
761 volatile union igc_adv_rx_desc *rxdp;
762 struct igc_rx_queue *rxq = rx_queue;
765 if (unlikely(!rxq || offset >= rxq->nb_rx_desc))
768 desc = rxq->rx_tail + offset;
769 if (desc >= rxq->nb_rx_desc)
770 desc -= rxq->nb_rx_desc;
772 rxdp = &rxq->rx_ring[desc];
773 return !!(rxdp->wb.upper.status_error &
774 rte_cpu_to_le_32(IGC_RXD_STAT_DD));
777 int eth_igc_rx_descriptor_status(void *rx_queue, uint16_t offset)
779 struct igc_rx_queue *rxq = rx_queue;
780 volatile uint32_t *status;
783 if (unlikely(!rxq || offset >= rxq->nb_rx_desc))
786 if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold)
787 return RTE_ETH_RX_DESC_UNAVAIL;
789 desc = rxq->rx_tail + offset;
790 if (desc >= rxq->nb_rx_desc)
791 desc -= rxq->nb_rx_desc;
793 status = &rxq->rx_ring[desc].wb.upper.status_error;
794 if (*status & rte_cpu_to_le_32(IGC_RXD_STAT_DD))
795 return RTE_ETH_RX_DESC_DONE;
797 return RTE_ETH_RX_DESC_AVAIL;
801 igc_alloc_rx_queue_mbufs(struct igc_rx_queue *rxq)
803 struct igc_rx_entry *rxe = rxq->sw_ring;
807 /* Initialize software ring entries. */
808 for (i = 0; i < rxq->nb_rx_desc; i++) {
809 volatile union igc_adv_rx_desc *rxd;
810 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
813 PMD_DRV_LOG(ERR, "RX mbuf alloc failed, queue_id=%hu",
817 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
818 rxd = &rxq->rx_ring[i];
819 rxd->read.hdr_addr = 0;
820 rxd->read.pkt_addr = dma_addr;
828 * RSS random key supplied in section 7.1.2.9.3 of the Intel I225 datasheet.
829 * Used as the default key.
831 static uint8_t default_rss_key[40] = {
832 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
833 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
834 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
835 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
836 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
840 igc_rss_disable(struct rte_eth_dev *dev)
842 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
845 mrqc = IGC_READ_REG(hw, IGC_MRQC);
846 mrqc &= ~IGC_MRQC_ENABLE_MASK;
847 IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
851 igc_hw_rss_hash_set(struct igc_hw *hw, struct rte_eth_rss_conf *rss_conf)
853 uint32_t *hash_key = (uint32_t *)rss_conf->rss_key;
857 if (hash_key != NULL) {
860 /* Fill in RSS hash key */
861 for (i = 0; i < IGC_HKEY_MAX_INDEX; i++)
862 IGC_WRITE_REG_LE_VALUE(hw, IGC_RSSRK(i), hash_key[i]);
865 /* Set configured hashing protocols in MRQC register */
866 rss_hf = rss_conf->rss_hf;
867 mrqc = IGC_MRQC_ENABLE_RSS_4Q; /* RSS enabled. */
868 if (rss_hf & ETH_RSS_IPV4)
869 mrqc |= IGC_MRQC_RSS_FIELD_IPV4;
870 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
871 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_TCP;
872 if (rss_hf & ETH_RSS_IPV6)
873 mrqc |= IGC_MRQC_RSS_FIELD_IPV6;
874 if (rss_hf & ETH_RSS_IPV6_EX)
875 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_EX;
876 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
877 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP;
878 if (rss_hf & ETH_RSS_IPV6_TCP_EX)
879 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_TCP_EX;
880 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
881 mrqc |= IGC_MRQC_RSS_FIELD_IPV4_UDP;
882 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
883 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP;
884 if (rss_hf & ETH_RSS_IPV6_UDP_EX)
885 mrqc |= IGC_MRQC_RSS_FIELD_IPV6_UDP_EX;
886 IGC_WRITE_REG(hw, IGC_MRQC, mrqc);
890 igc_rss_configure(struct rte_eth_dev *dev)
892 struct rte_eth_rss_conf rss_conf;
893 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
896 /* Fill in redirection table. */
897 for (i = 0; i < IGC_RSS_RDT_SIZD; i++) {
898 union igc_rss_reta_reg reta;
899 uint16_t q_idx, reta_idx;
901 q_idx = (uint8_t)((dev->data->nb_rx_queues > 1) ?
902 i % dev->data->nb_rx_queues : 0);
903 reta_idx = i % sizeof(reta);
904 reta.bytes[reta_idx] = q_idx;
905 if (reta_idx == sizeof(reta) - 1)
906 IGC_WRITE_REG_LE_VALUE(hw,
907 IGC_RETA(i / sizeof(reta)), reta.dword);
911 * Configure the RSS key and the RSS protocols used to compute
912 * the RSS hash of input packets.
914 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
915 if (rss_conf.rss_key == NULL)
916 rss_conf.rss_key = default_rss_key;
917 igc_hw_rss_hash_set(hw, &rss_conf);
921 igc_dev_mq_rx_configure(struct rte_eth_dev *dev)
923 if (RTE_ETH_DEV_SRIOV(dev).active) {
924 PMD_DRV_LOG(ERR, "SRIOV unsupported!");
928 switch (dev->data->dev_conf.rxmode.mq_mode) {
930 igc_rss_configure(dev);
934 * configure RSS register for following,
935 * then disable the RSS logic
937 igc_rss_configure(dev);
938 igc_rss_disable(dev);
941 PMD_DRV_LOG(ERR, "rx mode(%d) not supported!",
942 dev->data->dev_conf.rxmode.mq_mode);
949 igc_rx_init(struct rte_eth_dev *dev)
951 struct igc_rx_queue *rxq;
952 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
953 uint64_t offloads = dev->data->dev_conf.rxmode.offloads;
954 uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
962 dev->rx_pkt_burst = igc_recv_pkts;
965 * Make sure receives are disabled while setting
966 * up the descriptor ring.
968 rctl = IGC_READ_REG(hw, IGC_RCTL);
969 IGC_WRITE_REG(hw, IGC_RCTL, rctl & ~IGC_RCTL_EN);
971 /* Configure support of jumbo frames, if any. */
972 if (offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
973 rctl |= IGC_RCTL_LPE;
976 * Set maximum packet length by default, and might be updated
977 * together with enabling/disabling dual VLAN.
979 IGC_WRITE_REG(hw, IGC_RLPML, max_rx_pkt_len);
981 rctl &= ~IGC_RCTL_LPE;
984 /* Configure and enable each RX queue. */
986 for (i = 0; i < dev->data->nb_rx_queues; i++) {
991 rxq = dev->data->rx_queues[i];
994 /* Allocate buffers for descriptor rings and set up queue */
995 ret = igc_alloc_rx_queue_mbufs(rxq);
1000 * Reset crc_len in case it was changed after queue setup by a
1003 rxq->crc_len = (offloads & DEV_RX_OFFLOAD_KEEP_CRC) ?
1004 RTE_ETHER_CRC_LEN : 0;
1006 bus_addr = rxq->rx_ring_phys_addr;
1007 IGC_WRITE_REG(hw, IGC_RDLEN(rxq->reg_idx),
1009 sizeof(union igc_adv_rx_desc));
1010 IGC_WRITE_REG(hw, IGC_RDBAH(rxq->reg_idx),
1011 (uint32_t)(bus_addr >> 32));
1012 IGC_WRITE_REG(hw, IGC_RDBAL(rxq->reg_idx),
1013 (uint32_t)bus_addr);
1015 /* set descriptor configuration */
1016 srrctl = IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
1018 srrctl |= (uint32_t)(RTE_PKTMBUF_HEADROOM / 64) <<
1019 IGC_SRRCTL_BSIZEHEADER_SHIFT;
1021 * Configure RX buffer size.
1023 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1024 RTE_PKTMBUF_HEADROOM);
1025 if (buf_size >= 1024) {
1027 * Configure the BSIZEPACKET field of the SRRCTL
1028 * register of the queue.
1029 * Value is in 1 KB resolution, from 1 KB to 16 KB.
1030 * If this field is equal to 0b, then RCTL.BSIZE
1031 * determines the RX packet buffer size.
1034 srrctl |= ((buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT) &
1035 IGC_SRRCTL_BSIZEPKT_MASK);
1036 buf_size = (uint16_t)((srrctl &
1037 IGC_SRRCTL_BSIZEPKT_MASK) <<
1038 IGC_SRRCTL_BSIZEPKT_SHIFT);
1040 /* It adds dual VLAN length for supporting dual VLAN */
1041 if (max_rx_pkt_len + 2 * VLAN_TAG_SIZE > buf_size)
1042 dev->data->scattered_rx = 1;
1045 * Use BSIZE field of the device RCTL register.
1047 if (rctl_bsize == 0 || rctl_bsize > buf_size)
1048 rctl_bsize = buf_size;
1049 dev->data->scattered_rx = 1;
1052 /* Set if packets are dropped when no descriptors available */
1054 srrctl |= IGC_SRRCTL_DROP_EN;
1056 IGC_WRITE_REG(hw, IGC_SRRCTL(rxq->reg_idx), srrctl);
1058 /* Enable this RX queue. */
1059 rxdctl = IGC_RXDCTL_QUEUE_ENABLE;
1060 rxdctl |= ((uint32_t)rxq->pthresh << IGC_RXDCTL_PTHRESH_SHIFT) &
1061 IGC_RXDCTL_PTHRESH_MSK;
1062 rxdctl |= ((uint32_t)rxq->hthresh << IGC_RXDCTL_HTHRESH_SHIFT) &
1063 IGC_RXDCTL_HTHRESH_MSK;
1064 rxdctl |= ((uint32_t)rxq->wthresh << IGC_RXDCTL_WTHRESH_SHIFT) &
1065 IGC_RXDCTL_WTHRESH_MSK;
1066 IGC_WRITE_REG(hw, IGC_RXDCTL(rxq->reg_idx), rxdctl);
1069 if (offloads & DEV_RX_OFFLOAD_SCATTER)
1070 dev->data->scattered_rx = 1;
1072 if (dev->data->scattered_rx) {
1073 PMD_DRV_LOG(DEBUG, "forcing scatter mode");
1074 dev->rx_pkt_burst = igc_recv_scattered_pkts;
1077 * Setup BSIZE field of RCTL register, if needed.
1078 * Buffer sizes >= 1024 are not [supposed to be] setup in the RCTL
1079 * register, since the code above configures the SRRCTL register of
1080 * the RX queue in such a case.
1081 * All configurable sizes are:
1082 * 16384: rctl |= (IGC_RCTL_SZ_16384 | IGC_RCTL_BSEX);
1083 * 8192: rctl |= (IGC_RCTL_SZ_8192 | IGC_RCTL_BSEX);
1084 * 4096: rctl |= (IGC_RCTL_SZ_4096 | IGC_RCTL_BSEX);
1085 * 2048: rctl |= IGC_RCTL_SZ_2048;
1086 * 1024: rctl |= IGC_RCTL_SZ_1024;
1087 * 512: rctl |= IGC_RCTL_SZ_512;
1088 * 256: rctl |= IGC_RCTL_SZ_256;
1090 if (rctl_bsize > 0) {
1091 if (rctl_bsize >= 512) /* 512 <= buf_size < 1024 - use 512 */
1092 rctl |= IGC_RCTL_SZ_512;
1093 else /* 256 <= buf_size < 512 - use 256 */
1094 rctl |= IGC_RCTL_SZ_256;
1098 * Configure RSS if device configured with multiple RX queues.
1100 igc_dev_mq_rx_configure(dev);
1102 /* Update the rctl since igc_dev_mq_rx_configure may change its value */
1103 rctl |= IGC_READ_REG(hw, IGC_RCTL);
1106 * Setup the Checksum Register.
1107 * Receive Full-Packet Checksum Offload is mutually exclusive with RSS.
1109 rxcsum = IGC_READ_REG(hw, IGC_RXCSUM);
1110 rxcsum |= IGC_RXCSUM_PCSD;
1112 /* Enable both L3/L4 rx checksum offload */
1113 if (offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
1114 rxcsum |= IGC_RXCSUM_IPOFL;
1116 rxcsum &= ~IGC_RXCSUM_IPOFL;
1119 (DEV_RX_OFFLOAD_TCP_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM)) {
1120 rxcsum |= IGC_RXCSUM_TUOFL;
1121 offloads |= DEV_RX_OFFLOAD_SCTP_CKSUM;
1123 rxcsum &= ~IGC_RXCSUM_TUOFL;
1126 if (offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)
1127 rxcsum |= IGC_RXCSUM_CRCOFL;
1129 rxcsum &= ~IGC_RXCSUM_CRCOFL;
1131 IGC_WRITE_REG(hw, IGC_RXCSUM, rxcsum);
1133 /* Setup the Receive Control Register. */
1134 if (offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1135 rctl &= ~IGC_RCTL_SECRC; /* Do not Strip Ethernet CRC. */
1137 rctl |= IGC_RCTL_SECRC; /* Strip Ethernet CRC. */
1139 rctl &= ~IGC_RCTL_MO_MSK;
1140 rctl &= ~IGC_RCTL_LBM_MSK;
1141 rctl |= IGC_RCTL_EN | IGC_RCTL_BAM | IGC_RCTL_LBM_NO |
1143 (hw->mac.mc_filter_type << IGC_RCTL_MO_SHIFT);
1145 if (dev->data->dev_conf.lpbk_mode == 1)
1146 rctl |= IGC_RCTL_LBM_MAC;
1148 rctl &= ~(IGC_RCTL_HSEL_MSK | IGC_RCTL_CFIEN | IGC_RCTL_CFI |
1149 IGC_RCTL_PSP | IGC_RCTL_PMCF);
1151 /* Make sure VLAN Filters are off. */
1152 rctl &= ~IGC_RCTL_VFE;
1153 /* Don't store bad packets. */
1154 rctl &= ~IGC_RCTL_SBP;
1156 /* Enable Receives. */
1157 IGC_WRITE_REG(hw, IGC_RCTL, rctl);
1160 * Setup the HW Rx Head and Tail Descriptor Pointers.
1161 * This needs to be done after enable.
1163 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1164 rxq = dev->data->rx_queues[i];
1165 IGC_WRITE_REG(hw, IGC_RDH(rxq->reg_idx), 0);
1166 IGC_WRITE_REG(hw, IGC_RDT(rxq->reg_idx),
1167 rxq->nb_rx_desc - 1);
1169 /* strip queue vlan offload */
1170 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
1172 dvmolr = IGC_READ_REG(hw, IGC_DVMOLR(rxq->queue_id));
1174 /* If vlan been stripped off, the CRC is meaningless. */
1175 dvmolr |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC;
1176 IGC_WRITE_REG(hw, IGC_DVMOLR(rxq->reg_idx), dvmolr);
1184 igc_reset_rx_queue(struct igc_rx_queue *rxq)
1186 static const union igc_adv_rx_desc zeroed_desc = { {0} };
1189 /* Zero out HW ring memory */
1190 for (i = 0; i < rxq->nb_rx_desc; i++)
1191 rxq->rx_ring[i] = zeroed_desc;
1194 rxq->pkt_first_seg = NULL;
1195 rxq->pkt_last_seg = NULL;
1199 eth_igc_rx_queue_setup(struct rte_eth_dev *dev,
1202 unsigned int socket_id,
1203 const struct rte_eth_rxconf *rx_conf,
1204 struct rte_mempool *mp)
1206 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
1207 const struct rte_memzone *rz;
1208 struct igc_rx_queue *rxq;
1212 * Validate number of receive descriptors.
1213 * It must not exceed hardware maximum, and must be multiple
1214 * of IGC_RX_DESCRIPTOR_MULTIPLE.
1216 if (nb_desc % IGC_RX_DESCRIPTOR_MULTIPLE != 0 ||
1217 nb_desc > IGC_MAX_RXD || nb_desc < IGC_MIN_RXD) {
1219 "RX descriptor must be multiple of %u(cur: %u) and between %u and %u",
1220 IGC_RX_DESCRIPTOR_MULTIPLE, nb_desc,
1221 IGC_MIN_RXD, IGC_MAX_RXD);
1225 /* Free memory prior to re-allocation if needed */
1226 if (dev->data->rx_queues[queue_idx] != NULL) {
1227 igc_rx_queue_release(dev->data->rx_queues[queue_idx]);
1228 dev->data->rx_queues[queue_idx] = NULL;
1231 /* First allocate the RX queue data structure. */
1232 rxq = rte_zmalloc("ethdev RX queue", sizeof(struct igc_rx_queue),
1233 RTE_CACHE_LINE_SIZE);
1236 rxq->offloads = rx_conf->offloads;
1238 rxq->nb_rx_desc = nb_desc;
1239 rxq->pthresh = rx_conf->rx_thresh.pthresh;
1240 rxq->hthresh = rx_conf->rx_thresh.hthresh;
1241 rxq->wthresh = rx_conf->rx_thresh.wthresh;
1242 rxq->drop_en = rx_conf->rx_drop_en;
1243 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1244 rxq->queue_id = queue_idx;
1245 rxq->reg_idx = queue_idx;
1246 rxq->port_id = dev->data->port_id;
1249 * Allocate RX ring hardware descriptors. A memzone large enough to
1250 * handle the maximum ring size is allocated in order to allow for
1251 * resizing in later calls to the queue setup function.
1253 size = sizeof(union igc_adv_rx_desc) * IGC_MAX_RXD;
1254 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size,
1255 IGC_ALIGN, socket_id);
1257 igc_rx_queue_release(rxq);
1260 rxq->rdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDT(rxq->reg_idx));
1261 rxq->rdh_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_RDH(rxq->reg_idx));
1262 rxq->rx_ring_phys_addr = rz->iova;
1263 rxq->rx_ring = (union igc_adv_rx_desc *)rz->addr;
1265 /* Allocate software ring. */
1266 rxq->sw_ring = rte_zmalloc("rxq->sw_ring",
1267 sizeof(struct igc_rx_entry) * nb_desc,
1268 RTE_CACHE_LINE_SIZE);
1269 if (rxq->sw_ring == NULL) {
1270 igc_rx_queue_release(rxq);
1274 PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
1275 rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
1277 dev->data->rx_queues[queue_idx] = rxq;
1278 igc_reset_rx_queue(rxq);
1283 /* prepare packets for transmit */
1285 eth_igc_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
1291 for (i = 0; i < nb_pkts; i++) {
1294 /* Check some limitations for TSO in hardware */
1295 if (m->ol_flags & IGC_TX_OFFLOAD_SEG)
1296 if (m->tso_segsz > IGC_TSO_MAX_MSS ||
1297 m->l2_len + m->l3_len + m->l4_len >
1298 IGC_TSO_MAX_HDRLEN) {
1303 if (m->ol_flags & IGC_TX_OFFLOAD_NOTSUP_MASK) {
1304 rte_errno = ENOTSUP;
1308 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1309 ret = rte_validate_tx_offload(m);
1315 ret = rte_net_intel_cksum_prepare(m);
1326 *There're some limitations in hardware for TCP segmentation offload. We
1327 *should check whether the parameters are valid.
1329 static inline uint64_t
1330 check_tso_para(uint64_t ol_req, union igc_tx_offload ol_para)
1332 if (!(ol_req & IGC_TX_OFFLOAD_SEG))
1334 if (ol_para.tso_segsz > IGC_TSO_MAX_MSS || ol_para.l2_len +
1335 ol_para.l3_len + ol_para.l4_len > IGC_TSO_MAX_HDRLEN) {
1336 ol_req &= ~IGC_TX_OFFLOAD_SEG;
1337 ol_req |= PKT_TX_TCP_CKSUM;
1343 * Check which hardware context can be used. Use the existing match
1344 * or create a new context descriptor.
1346 static inline uint32_t
1347 what_advctx_update(struct igc_tx_queue *txq, uint64_t flags,
1348 union igc_tx_offload tx_offload)
1350 uint32_t curr = txq->ctx_curr;
1352 /* If match with the current context */
1353 if (likely(txq->ctx_cache[curr].flags == flags &&
1354 txq->ctx_cache[curr].tx_offload.data ==
1355 (txq->ctx_cache[curr].tx_offload_mask.data &
1356 tx_offload.data))) {
1360 /* Total two context, if match with the second context */
1362 if (likely(txq->ctx_cache[curr].flags == flags &&
1363 txq->ctx_cache[curr].tx_offload.data ==
1364 (txq->ctx_cache[curr].tx_offload_mask.data &
1365 tx_offload.data))) {
1366 txq->ctx_curr = curr;
1370 /* Mismatch, create new one */
1375 * This is a separate function, looking for optimization opportunity here
1376 * Rework required to go with the pre-defined values.
1379 igc_set_xmit_ctx(struct igc_tx_queue *txq,
1380 volatile struct igc_adv_tx_context_desc *ctx_txd,
1381 uint64_t ol_flags, union igc_tx_offload tx_offload)
1383 uint32_t type_tucmd_mlhl;
1384 uint32_t mss_l4len_idx;
1386 uint32_t vlan_macip_lens;
1387 union igc_tx_offload tx_offload_mask;
1389 /* Use the previous context */
1391 ctx_curr = txq->ctx_curr;
1393 tx_offload_mask.data = 0;
1394 type_tucmd_mlhl = 0;
1396 /* Specify which HW CTX to upload. */
1397 mss_l4len_idx = (ctx_curr << IGC_ADVTXD_IDX_SHIFT);
1399 if (ol_flags & PKT_TX_VLAN_PKT)
1400 tx_offload_mask.vlan_tci = 0xffff;
1402 /* check if TCP segmentation required for this packet */
1403 if (ol_flags & IGC_TX_OFFLOAD_SEG) {
1404 /* implies IP cksum in IPv4 */
1405 if (ol_flags & PKT_TX_IP_CKSUM)
1406 type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4 |
1407 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1409 type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV6 |
1410 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1412 if (ol_flags & PKT_TX_TCP_SEG)
1413 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP;
1415 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP;
1417 tx_offload_mask.data |= TX_TSO_CMP_MASK;
1418 mss_l4len_idx |= (uint32_t)tx_offload.tso_segsz <<
1419 IGC_ADVTXD_MSS_SHIFT;
1420 mss_l4len_idx |= (uint32_t)tx_offload.l4_len <<
1421 IGC_ADVTXD_L4LEN_SHIFT;
1422 } else { /* no TSO, check if hardware checksum is needed */
1423 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1424 tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
1426 if (ol_flags & PKT_TX_IP_CKSUM)
1427 type_tucmd_mlhl = IGC_ADVTXD_TUCMD_IPV4;
1429 switch (ol_flags & PKT_TX_L4_MASK) {
1430 case PKT_TX_TCP_CKSUM:
1431 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_TCP |
1432 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1433 mss_l4len_idx |= (uint32_t)sizeof(struct rte_tcp_hdr)
1434 << IGC_ADVTXD_L4LEN_SHIFT;
1436 case PKT_TX_UDP_CKSUM:
1437 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_UDP |
1438 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1439 mss_l4len_idx |= (uint32_t)sizeof(struct rte_udp_hdr)
1440 << IGC_ADVTXD_L4LEN_SHIFT;
1442 case PKT_TX_SCTP_CKSUM:
1443 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_SCTP |
1444 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1445 mss_l4len_idx |= (uint32_t)sizeof(struct rte_sctp_hdr)
1446 << IGC_ADVTXD_L4LEN_SHIFT;
1449 type_tucmd_mlhl |= IGC_ADVTXD_TUCMD_L4T_RSV |
1450 IGC_ADVTXD_DTYP_CTXT | IGC_ADVTXD_DCMD_DEXT;
1455 txq->ctx_cache[ctx_curr].flags = ol_flags;
1456 txq->ctx_cache[ctx_curr].tx_offload.data =
1457 tx_offload_mask.data & tx_offload.data;
1458 txq->ctx_cache[ctx_curr].tx_offload_mask = tx_offload_mask;
1460 ctx_txd->type_tucmd_mlhl = rte_cpu_to_le_32(type_tucmd_mlhl);
1461 vlan_macip_lens = (uint32_t)tx_offload.data;
1462 ctx_txd->vlan_macip_lens = rte_cpu_to_le_32(vlan_macip_lens);
1463 ctx_txd->mss_l4len_idx = rte_cpu_to_le_32(mss_l4len_idx);
1464 ctx_txd->u.launch_time = 0;
1467 static inline uint32_t
1468 tx_desc_vlan_flags_to_cmdtype(uint64_t ol_flags)
1471 static uint32_t vlan_cmd[2] = {0, IGC_ADVTXD_DCMD_VLE};
1472 static uint32_t tso_cmd[2] = {0, IGC_ADVTXD_DCMD_TSE};
1473 cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
1474 cmdtype |= tso_cmd[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
1478 static inline uint32_t
1479 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
1481 static const uint32_t l4_olinfo[2] = {0, IGC_ADVTXD_POPTS_TXSM};
1482 static const uint32_t l3_olinfo[2] = {0, IGC_ADVTXD_POPTS_IXSM};
1485 tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
1486 tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
1487 tmp |= l4_olinfo[(ol_flags & IGC_TX_OFFLOAD_SEG) != 0];
1492 igc_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1494 struct igc_tx_queue * const txq = tx_queue;
1495 struct igc_tx_entry * const sw_ring = txq->sw_ring;
1496 struct igc_tx_entry *txe, *txn;
1497 volatile union igc_adv_tx_desc * const txr = txq->tx_ring;
1498 volatile union igc_adv_tx_desc *txd;
1499 struct rte_mbuf *tx_pkt;
1500 struct rte_mbuf *m_seg;
1501 uint64_t buf_dma_addr;
1502 uint32_t olinfo_status;
1503 uint32_t cmd_type_len;
1512 uint32_t new_ctx = 0;
1513 union igc_tx_offload tx_offload = {0};
1515 tx_id = txq->tx_tail;
1516 txe = &sw_ring[tx_id];
1518 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1519 tx_pkt = *tx_pkts++;
1520 pkt_len = tx_pkt->pkt_len;
1522 RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
1525 * The number of descriptors that must be allocated for a
1526 * packet is the number of segments of that packet, plus 1
1527 * Context Descriptor for the VLAN Tag Identifier, if any.
1528 * Determine the last TX descriptor to allocate in the TX ring
1529 * for the packet, starting from the current position (tx_id)
1532 tx_last = (uint16_t)(tx_id + tx_pkt->nb_segs - 1);
1534 ol_flags = tx_pkt->ol_flags;
1535 tx_ol_req = ol_flags & IGC_TX_OFFLOAD_MASK;
1537 /* If a Context Descriptor need be built . */
1539 tx_offload.l2_len = tx_pkt->l2_len;
1540 tx_offload.l3_len = tx_pkt->l3_len;
1541 tx_offload.l4_len = tx_pkt->l4_len;
1542 tx_offload.vlan_tci = tx_pkt->vlan_tci;
1543 tx_offload.tso_segsz = tx_pkt->tso_segsz;
1544 tx_ol_req = check_tso_para(tx_ol_req, tx_offload);
1546 new_ctx = what_advctx_update(txq, tx_ol_req,
1548 /* Only allocate context descriptor if required*/
1549 new_ctx = (new_ctx >= IGC_CTX_NUM);
1550 tx_last = (uint16_t)(tx_last + new_ctx);
1552 if (tx_last >= txq->nb_tx_desc)
1553 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
1556 "port_id=%u queue_id=%u pktlen=%u tx_first=%u tx_last=%u",
1557 txq->port_id, txq->queue_id, pkt_len, tx_id, tx_last);
1560 * Check if there are enough free descriptors in the TX ring
1561 * to transmit the next packet.
1562 * This operation is based on the two following rules:
1564 * 1- Only check that the last needed TX descriptor can be
1565 * allocated (by construction, if that descriptor is free,
1566 * all intermediate ones are also free).
1568 * For this purpose, the index of the last TX descriptor
1569 * used for a packet (the "last descriptor" of a packet)
1570 * is recorded in the TX entries (the last one included)
1571 * that are associated with all TX descriptors allocated
1574 * 2- Avoid to allocate the last free TX descriptor of the
1575 * ring, in order to never set the TDT register with the
1576 * same value stored in parallel by the NIC in the TDH
1577 * register, which makes the TX engine of the NIC enter
1578 * in a deadlock situation.
1580 * By extension, avoid to allocate a free descriptor that
1581 * belongs to the last set of free descriptors allocated
1582 * to the same packet previously transmitted.
1586 * The "last descriptor" of the previously sent packet, if any,
1587 * which used the last descriptor to allocate.
1589 tx_end = sw_ring[tx_last].last_id;
1592 * The next descriptor following that "last descriptor" in the
1595 tx_end = sw_ring[tx_end].next_id;
1598 * The "last descriptor" associated with that next descriptor.
1600 tx_end = sw_ring[tx_end].last_id;
1603 * Check that this descriptor is free.
1605 if (!(txr[tx_end].wb.status & IGC_TXD_STAT_DD)) {
1612 * Set common flags of all TX Data Descriptors.
1614 * The following bits must be set in all Data Descriptors:
1615 * - IGC_ADVTXD_DTYP_DATA
1616 * - IGC_ADVTXD_DCMD_DEXT
1618 * The following bits must be set in the first Data Descriptor
1619 * and are ignored in the other ones:
1620 * - IGC_ADVTXD_DCMD_IFCS
1621 * - IGC_ADVTXD_MAC_1588
1622 * - IGC_ADVTXD_DCMD_VLE
1624 * The following bits must only be set in the last Data
1628 * The following bits can be set in any Data Descriptor, but
1629 * are only set in the last Data Descriptor:
1632 cmd_type_len = txq->txd_type |
1633 IGC_ADVTXD_DCMD_IFCS | IGC_ADVTXD_DCMD_DEXT;
1634 if (tx_ol_req & IGC_TX_OFFLOAD_SEG)
1635 pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len +
1637 olinfo_status = (pkt_len << IGC_ADVTXD_PAYLEN_SHIFT);
1640 * Timer 0 should be used to for packet timestamping,
1641 * sample the packet timestamp to reg 0
1643 if (ol_flags & PKT_TX_IEEE1588_TMST)
1644 cmd_type_len |= IGC_ADVTXD_MAC_TSTAMP;
1647 /* Setup TX Advanced context descriptor if required */
1649 volatile struct igc_adv_tx_context_desc *
1650 ctx_txd = (volatile struct
1651 igc_adv_tx_context_desc *)&txr[tx_id];
1653 txn = &sw_ring[txe->next_id];
1654 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1656 if (txe->mbuf != NULL) {
1657 rte_pktmbuf_free_seg(txe->mbuf);
1661 igc_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
1664 txe->last_id = tx_last;
1665 tx_id = txe->next_id;
1669 /* Setup the TX Advanced Data Descriptor */
1671 tx_desc_vlan_flags_to_cmdtype(tx_ol_req);
1673 tx_desc_cksum_flags_to_olinfo(tx_ol_req);
1674 olinfo_status |= (uint32_t)txq->ctx_curr <<
1675 IGC_ADVTXD_IDX_SHIFT;
1680 txn = &sw_ring[txe->next_id];
1681 RTE_MBUF_PREFETCH_TO_FREE(txn->mbuf);
1685 if (txe->mbuf != NULL)
1686 rte_pktmbuf_free_seg(txe->mbuf);
1689 /* Set up transmit descriptor */
1690 slen = (uint16_t)m_seg->data_len;
1691 buf_dma_addr = rte_mbuf_data_iova(m_seg);
1692 txd->read.buffer_addr =
1693 rte_cpu_to_le_64(buf_dma_addr);
1694 txd->read.cmd_type_len =
1695 rte_cpu_to_le_32(cmd_type_len | slen);
1696 txd->read.olinfo_status =
1697 rte_cpu_to_le_32(olinfo_status);
1698 txe->last_id = tx_last;
1699 tx_id = txe->next_id;
1701 m_seg = m_seg->next;
1702 } while (m_seg != NULL);
1705 * The last packet data descriptor needs End Of Packet (EOP)
1706 * and Report Status (RS).
1708 txd->read.cmd_type_len |=
1709 rte_cpu_to_le_32(IGC_TXD_CMD_EOP | IGC_TXD_CMD_RS);
1715 * Set the Transmit Descriptor Tail (TDT).
1717 IGC_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id);
1718 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1719 txq->port_id, txq->queue_id, tx_id, nb_tx);
1720 txq->tx_tail = tx_id;
1725 int eth_igc_tx_descriptor_status(void *tx_queue, uint16_t offset)
1727 struct igc_tx_queue *txq = tx_queue;
1728 volatile uint32_t *status;
1731 if (unlikely(!txq || offset >= txq->nb_tx_desc))
1734 desc = txq->tx_tail + offset;
1735 if (desc >= txq->nb_tx_desc)
1736 desc -= txq->nb_tx_desc;
1738 status = &txq->tx_ring[desc].wb.status;
1739 if (*status & rte_cpu_to_le_32(IGC_TXD_STAT_DD))
1740 return RTE_ETH_TX_DESC_DONE;
1742 return RTE_ETH_TX_DESC_FULL;
1746 igc_tx_queue_release_mbufs(struct igc_tx_queue *txq)
1750 if (txq->sw_ring != NULL) {
1751 for (i = 0; i < txq->nb_tx_desc; i++) {
1752 if (txq->sw_ring[i].mbuf != NULL) {
1753 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1754 txq->sw_ring[i].mbuf = NULL;
1761 igc_tx_queue_release(struct igc_tx_queue *txq)
1763 igc_tx_queue_release_mbufs(txq);
1764 rte_free(txq->sw_ring);
1768 void eth_igc_tx_queue_release(void *txq)
1771 igc_tx_queue_release(txq);
1775 igc_reset_tx_queue_stat(struct igc_tx_queue *txq)
1780 memset((void *)&txq->ctx_cache, 0,
1781 IGC_CTX_NUM * sizeof(struct igc_advctx_info));
1785 igc_reset_tx_queue(struct igc_tx_queue *txq)
1787 struct igc_tx_entry *txe = txq->sw_ring;
1790 /* Initialize ring entries */
1791 prev = (uint16_t)(txq->nb_tx_desc - 1);
1792 for (i = 0; i < txq->nb_tx_desc; i++) {
1793 volatile union igc_adv_tx_desc *txd = &txq->tx_ring[i];
1795 txd->wb.status = IGC_TXD_STAT_DD;
1798 txe[prev].next_id = i;
1802 txq->txd_type = IGC_ADVTXD_DTYP_DATA;
1803 igc_reset_tx_queue_stat(txq);
1807 * clear all rx/tx queue
1810 igc_dev_clear_queues(struct rte_eth_dev *dev)
1813 struct igc_tx_queue *txq;
1814 struct igc_rx_queue *rxq;
1816 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1817 txq = dev->data->tx_queues[i];
1819 igc_tx_queue_release_mbufs(txq);
1820 igc_reset_tx_queue(txq);
1824 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1825 rxq = dev->data->rx_queues[i];
1827 igc_rx_queue_release_mbufs(rxq);
1828 igc_reset_rx_queue(rxq);
1833 int eth_igc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1834 uint16_t nb_desc, unsigned int socket_id,
1835 const struct rte_eth_txconf *tx_conf)
1837 const struct rte_memzone *tz;
1838 struct igc_tx_queue *txq;
1842 if (nb_desc % IGC_TX_DESCRIPTOR_MULTIPLE != 0 ||
1843 nb_desc > IGC_MAX_TXD || nb_desc < IGC_MIN_TXD) {
1845 "TX-descriptor must be a multiple of %u and between %u and %u, cur: %u",
1846 IGC_TX_DESCRIPTOR_MULTIPLE,
1847 IGC_MAX_TXD, IGC_MIN_TXD, nb_desc);
1851 hw = IGC_DEV_PRIVATE_HW(dev);
1854 * The tx_free_thresh and tx_rs_thresh values are not used in the 2.5G
1857 if (tx_conf->tx_free_thresh != 0)
1859 "The tx_free_thresh parameter is not used for the 2.5G driver");
1860 if (tx_conf->tx_rs_thresh != 0)
1862 "The tx_rs_thresh parameter is not used for the 2.5G driver");
1863 if (tx_conf->tx_thresh.wthresh == 0)
1865 "To improve 2.5G driver performance, consider setting the TX WTHRESH value to 4, 8, or 16.");
1867 /* Free memory prior to re-allocation if needed */
1868 if (dev->data->tx_queues[queue_idx] != NULL) {
1869 igc_tx_queue_release(dev->data->tx_queues[queue_idx]);
1870 dev->data->tx_queues[queue_idx] = NULL;
1873 /* First allocate the tx queue data structure */
1874 txq = rte_zmalloc("ethdev TX queue", sizeof(struct igc_tx_queue),
1875 RTE_CACHE_LINE_SIZE);
1880 * Allocate TX ring hardware descriptors. A memzone large enough to
1881 * handle the maximum ring size is allocated in order to allow for
1882 * resizing in later calls to the queue setup function.
1884 size = sizeof(union igc_adv_tx_desc) * IGC_MAX_TXD;
1885 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, size,
1886 IGC_ALIGN, socket_id);
1888 igc_tx_queue_release(txq);
1892 txq->nb_tx_desc = nb_desc;
1893 txq->pthresh = tx_conf->tx_thresh.pthresh;
1894 txq->hthresh = tx_conf->tx_thresh.hthresh;
1895 txq->wthresh = tx_conf->tx_thresh.wthresh;
1897 txq->queue_id = queue_idx;
1898 txq->reg_idx = queue_idx;
1899 txq->port_id = dev->data->port_id;
1901 txq->tdt_reg_addr = IGC_PCI_REG_ADDR(hw, IGC_TDT(txq->reg_idx));
1902 txq->tx_ring_phys_addr = tz->iova;
1904 txq->tx_ring = (union igc_adv_tx_desc *)tz->addr;
1905 /* Allocate software ring */
1906 txq->sw_ring = rte_zmalloc("txq->sw_ring",
1907 sizeof(struct igc_tx_entry) * nb_desc,
1908 RTE_CACHE_LINE_SIZE);
1909 if (txq->sw_ring == NULL) {
1910 igc_tx_queue_release(txq);
1913 PMD_DRV_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
1914 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1916 igc_reset_tx_queue(txq);
1917 dev->tx_pkt_burst = igc_xmit_pkts;
1918 dev->tx_pkt_prepare = ð_igc_prep_pkts;
1919 dev->data->tx_queues[queue_idx] = txq;
1920 txq->offloads = tx_conf->offloads;
1926 eth_igc_tx_done_cleanup(void *txqueue, uint32_t free_cnt)
1928 struct igc_tx_queue *txq = txqueue;
1929 struct igc_tx_entry *sw_ring;
1930 volatile union igc_adv_tx_desc *txr;
1931 uint16_t tx_first; /* First segment analyzed. */
1932 uint16_t tx_id; /* Current segment being processed. */
1933 uint16_t tx_last; /* Last segment in the current packet. */
1934 uint16_t tx_next; /* First segment of the next packet. */
1941 sw_ring = txq->sw_ring;
1945 * tx_tail is the last sent packet on the sw_ring. Goto the end
1946 * of that packet (the last segment in the packet chain) and
1947 * then the next segment will be the start of the oldest segment
1948 * in the sw_ring. This is the first packet that will be
1949 * attempted to be freed.
1952 /* Get last segment in most recently added packet. */
1953 tx_first = sw_ring[txq->tx_tail].last_id;
1955 /* Get the next segment, which is the oldest segment in ring. */
1956 tx_first = sw_ring[tx_first].next_id;
1958 /* Set the current index to the first. */
1962 * Loop through each packet. For each packet, verify that an
1963 * mbuf exists and that the last segment is free. If so, free
1967 tx_last = sw_ring[tx_id].last_id;
1969 if (sw_ring[tx_last].mbuf) {
1970 if (!(txr[tx_last].wb.status &
1971 rte_cpu_to_le_32(IGC_TXD_STAT_DD)))
1974 /* Get the start of the next packet. */
1975 tx_next = sw_ring[tx_last].next_id;
1978 * Loop through all segments in a
1982 rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
1983 sw_ring[tx_id].mbuf = NULL;
1984 sw_ring[tx_id].last_id = tx_id;
1986 /* Move to next segemnt. */
1987 tx_id = sw_ring[tx_id].next_id;
1988 } while (tx_id != tx_next);
1991 * Increment the number of packets
1995 if (unlikely(count == free_cnt))
1999 * There are multiple reasons to be here:
2000 * 1) All the packets on the ring have been
2001 * freed - tx_id is equal to tx_first
2002 * and some packets have been freed.
2004 * 2) Interfaces has not sent a rings worth of
2005 * packets yet, so the segment after tail is
2006 * still empty. Or a previous call to this
2007 * function freed some of the segments but
2008 * not all so there is a hole in the list.
2009 * Hopefully this is a rare case.
2010 * - Walk the list and find the next mbuf. If
2011 * there isn't one, then done.
2013 if (likely(tx_id == tx_first && count != 0))
2017 * Walk the list and find the next mbuf, if any.
2020 /* Move to next segemnt. */
2021 tx_id = sw_ring[tx_id].next_id;
2023 if (sw_ring[tx_id].mbuf)
2026 } while (tx_id != tx_first);
2029 * Determine why previous loop bailed. If there
2030 * is not an mbuf, done.
2032 if (sw_ring[tx_id].mbuf == NULL)
2041 igc_tx_init(struct rte_eth_dev *dev)
2043 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2048 /* Setup the Base and Length of the Tx Descriptor Rings. */
2049 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2050 struct igc_tx_queue *txq = dev->data->tx_queues[i];
2051 uint64_t bus_addr = txq->tx_ring_phys_addr;
2053 IGC_WRITE_REG(hw, IGC_TDLEN(txq->reg_idx),
2055 sizeof(union igc_adv_tx_desc));
2056 IGC_WRITE_REG(hw, IGC_TDBAH(txq->reg_idx),
2057 (uint32_t)(bus_addr >> 32));
2058 IGC_WRITE_REG(hw, IGC_TDBAL(txq->reg_idx),
2059 (uint32_t)bus_addr);
2061 /* Setup the HW Tx Head and Tail descriptor pointers. */
2062 IGC_WRITE_REG(hw, IGC_TDT(txq->reg_idx), 0);
2063 IGC_WRITE_REG(hw, IGC_TDH(txq->reg_idx), 0);
2065 /* Setup Transmit threshold registers. */
2066 txdctl = ((uint32_t)txq->pthresh << IGC_TXDCTL_PTHRESH_SHIFT) &
2067 IGC_TXDCTL_PTHRESH_MSK;
2068 txdctl |= ((uint32_t)txq->hthresh << IGC_TXDCTL_HTHRESH_SHIFT) &
2069 IGC_TXDCTL_HTHRESH_MSK;
2070 txdctl |= ((uint32_t)txq->wthresh << IGC_TXDCTL_WTHRESH_SHIFT) &
2071 IGC_TXDCTL_WTHRESH_MSK;
2072 txdctl |= IGC_TXDCTL_QUEUE_ENABLE;
2073 IGC_WRITE_REG(hw, IGC_TXDCTL(txq->reg_idx), txdctl);
2076 igc_config_collision_dist(hw);
2078 /* Program the Transmit Control Register. */
2079 tctl = IGC_READ_REG(hw, IGC_TCTL);
2080 tctl &= ~IGC_TCTL_CT;
2081 tctl |= (IGC_TCTL_PSP | IGC_TCTL_RTLC | IGC_TCTL_EN |
2082 ((uint32_t)IGC_COLLISION_THRESHOLD << IGC_CT_SHIFT));
2084 /* This write will effectively turn on the transmit unit. */
2085 IGC_WRITE_REG(hw, IGC_TCTL, tctl);
2089 eth_igc_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2090 struct rte_eth_rxq_info *qinfo)
2092 struct igc_rx_queue *rxq;
2094 rxq = dev->data->rx_queues[queue_id];
2096 qinfo->mp = rxq->mb_pool;
2097 qinfo->scattered_rx = dev->data->scattered_rx;
2098 qinfo->nb_desc = rxq->nb_rx_desc;
2100 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
2101 qinfo->conf.rx_drop_en = rxq->drop_en;
2102 qinfo->conf.offloads = rxq->offloads;
2103 qinfo->conf.rx_thresh.hthresh = rxq->hthresh;
2104 qinfo->conf.rx_thresh.pthresh = rxq->pthresh;
2105 qinfo->conf.rx_thresh.wthresh = rxq->wthresh;
2109 eth_igc_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
2110 struct rte_eth_txq_info *qinfo)
2112 struct igc_tx_queue *txq;
2114 txq = dev->data->tx_queues[queue_id];
2116 qinfo->nb_desc = txq->nb_tx_desc;
2118 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
2119 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
2120 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
2121 qinfo->conf.offloads = txq->offloads;
2125 eth_igc_vlan_strip_queue_set(struct rte_eth_dev *dev,
2126 uint16_t rx_queue_id, int on)
2128 struct igc_hw *hw = IGC_DEV_PRIVATE_HW(dev);
2129 struct igc_rx_queue *rxq = dev->data->rx_queues[rx_queue_id];
2132 if (rx_queue_id >= IGC_QUEUE_PAIRS_NUM) {
2133 PMD_DRV_LOG(ERR, "Queue index(%u) illegal, max is %u",
2134 rx_queue_id, IGC_QUEUE_PAIRS_NUM - 1);
2138 reg_val = IGC_READ_REG(hw, IGC_DVMOLR(rx_queue_id));
2140 /* If vlan been stripped off, the CRC is meaningless. */
2141 reg_val |= IGC_DVMOLR_STRVLAN | IGC_DVMOLR_STRCRC;
2142 rxq->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2144 reg_val &= ~(IGC_DVMOLR_STRVLAN | IGC_DVMOLR_HIDVLAN |
2146 rxq->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2149 IGC_WRITE_REG(hw, IGC_DVMOLR(rx_queue_id), reg_val);