1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
16 #include <rte_common.h>
17 #include <rte_cycles.h>
19 #include <rte_debug.h>
20 #include <rte_ethdev.h>
21 #include <rte_ethdev_driver.h>
22 #include <rte_memzone.h>
23 #include <rte_atomic.h>
24 #include <rte_mempool.h>
25 #include <rte_malloc.h>
27 #include <rte_ether.h>
28 #include <rte_prefetch.h>
32 #include <rte_string_fns.h>
33 #include <rte_errno.h>
37 #include "txgbe_logs.h"
38 #include "base/txgbe.h"
39 #include "txgbe_ethdev.h"
40 #include "txgbe_rxtx.h"
42 /* Bit Mask to indicate what bits required for building TX context */
43 static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
52 PKT_TX_OUTER_IP_CKSUM);
54 #define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
55 (PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
58 * Prefetch a cache line into all cache levels.
60 #define rte_txgbe_prefetch(p) rte_prefetch0(p)
63 txgbe_is_vf(struct rte_eth_dev *dev)
65 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
67 switch (hw->mac.type) {
68 case txgbe_mac_raptor_vf:
75 /*********************************************************************
79 **********************************************************************/
82 * Check for descriptors with their DD bit set and free mbufs.
83 * Return the total number of buffers freed.
85 static __rte_always_inline int
86 txgbe_tx_free_bufs(struct txgbe_tx_queue *txq)
88 struct txgbe_tx_entry *txep;
91 struct rte_mbuf *m, *free[RTE_TXGBE_TX_MAX_FREE_BUF_SZ];
93 /* check DD bit on threshold descriptor */
94 status = txq->tx_ring[txq->tx_next_dd].dw3;
95 if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
96 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
97 txgbe_set32_masked(txq->tdc_reg_addr,
98 TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
103 * first buffer to free from S/W ring is at index
104 * tx_next_dd - (tx_free_thresh-1)
106 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
107 for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
108 /* free buffers one at a time */
109 m = rte_pktmbuf_prefree_seg(txep->mbuf);
112 if (unlikely(m == NULL))
115 if (nb_free >= RTE_TXGBE_TX_MAX_FREE_BUF_SZ ||
116 (nb_free > 0 && m->pool != free[0]->pool)) {
117 rte_mempool_put_bulk(free[0]->pool,
118 (void **)free, nb_free);
126 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
128 /* buffers were freed, update counters */
129 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
130 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
131 if (txq->tx_next_dd >= txq->nb_tx_desc)
132 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
134 return txq->tx_free_thresh;
137 /* Populate 4 descriptors with data from 4 mbufs */
139 tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
141 uint64_t buf_dma_addr;
145 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
146 buf_dma_addr = rte_mbuf_data_iova(*pkts);
147 pkt_len = (*pkts)->data_len;
149 /* write data to descriptor */
150 txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
151 txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
152 TXGBE_TXD_DATLEN(pkt_len));
153 txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
155 rte_prefetch0(&(*pkts)->pool);
159 /* Populate 1 descriptor with data from 1 mbuf */
161 tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
163 uint64_t buf_dma_addr;
166 buf_dma_addr = rte_mbuf_data_iova(*pkts);
167 pkt_len = (*pkts)->data_len;
169 /* write data to descriptor */
170 txdp->qw0 = cpu_to_le64(buf_dma_addr);
171 txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
172 TXGBE_TXD_DATLEN(pkt_len));
173 txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
175 rte_prefetch0(&(*pkts)->pool);
179 * Fill H/W descriptor ring with mbuf data.
180 * Copy mbuf pointers to the S/W ring.
183 txgbe_tx_fill_hw_ring(struct txgbe_tx_queue *txq, struct rte_mbuf **pkts,
186 volatile struct txgbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
187 struct txgbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
188 const int N_PER_LOOP = 4;
189 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
190 int mainpart, leftover;
194 * Process most of the packets in chunks of N pkts. Any
195 * leftover packets will get processed one at a time.
197 mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
198 leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
199 for (i = 0; i < mainpart; i += N_PER_LOOP) {
200 /* Copy N mbuf pointers to the S/W ring */
201 for (j = 0; j < N_PER_LOOP; ++j)
202 (txep + i + j)->mbuf = *(pkts + i + j);
203 tx4(txdp + i, pkts + i);
206 if (unlikely(leftover > 0)) {
207 for (i = 0; i < leftover; ++i) {
208 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
209 tx1(txdp + mainpart + i, pkts + mainpart + i);
214 static inline uint16_t
215 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
218 struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
222 * Begin scanning the H/W ring for done descriptors when the
223 * number of available descriptors drops below tx_free_thresh. For
224 * each done descriptor, free the associated buffer.
226 if (txq->nb_tx_free < txq->tx_free_thresh)
227 txgbe_tx_free_bufs(txq);
229 /* Only use descriptors that are available */
230 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
231 if (unlikely(nb_pkts == 0))
234 /* Use exactly nb_pkts descriptors */
235 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
238 * At this point, we know there are enough descriptors in the
239 * ring to transmit all the packets. This assumes that each
240 * mbuf contains a single segment, and that no new offloads
241 * are expected, which would require a new context descriptor.
245 * See if we're going to wrap-around. If so, handle the top
246 * of the descriptor ring first, then do the bottom. If not,
247 * the processing looks just like the "bottom" part anyway...
249 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
250 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
251 txgbe_tx_fill_hw_ring(txq, tx_pkts, n);
255 /* Fill H/W descriptor ring with mbuf data */
256 txgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
257 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
260 * Check for wrap-around. This would only happen if we used
261 * up to the last descriptor in the ring, no more, no less.
263 if (txq->tx_tail >= txq->nb_tx_desc)
266 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
267 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
268 (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
270 /* update tail pointer */
272 txgbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
278 txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
283 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
284 if (likely(nb_pkts <= RTE_PMD_TXGBE_TX_MAX_BURST))
285 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
287 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
292 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_TX_MAX_BURST);
293 ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
294 nb_tx = (uint16_t)(nb_tx + ret);
295 nb_pkts = (uint16_t)(nb_pkts - ret);
304 txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
305 volatile struct txgbe_tx_ctx_desc *ctx_txd,
306 uint64_t ol_flags, union txgbe_tx_offload tx_offload)
308 union txgbe_tx_offload tx_offload_mask;
309 uint32_t type_tucmd_mlhl;
310 uint32_t mss_l4len_idx;
312 uint32_t vlan_macip_lens;
313 uint32_t tunnel_seed;
315 ctx_idx = txq->ctx_curr;
316 tx_offload_mask.data[0] = 0;
317 tx_offload_mask.data[1] = 0;
319 /* Specify which HW CTX to upload. */
320 mss_l4len_idx = TXGBE_TXD_IDX(ctx_idx);
321 type_tucmd_mlhl = TXGBE_TXD_CTXT;
323 tx_offload_mask.ptid |= ~0;
324 type_tucmd_mlhl |= TXGBE_TXD_PTID(tx_offload.ptid);
326 /* check if TCP segmentation required for this packet */
327 if (ol_flags & PKT_TX_TCP_SEG) {
328 tx_offload_mask.l2_len |= ~0;
329 tx_offload_mask.l3_len |= ~0;
330 tx_offload_mask.l4_len |= ~0;
331 tx_offload_mask.tso_segsz |= ~0;
332 mss_l4len_idx |= TXGBE_TXD_MSS(tx_offload.tso_segsz);
333 mss_l4len_idx |= TXGBE_TXD_L4LEN(tx_offload.l4_len);
334 } else { /* no TSO, check if hardware checksum is needed */
335 if (ol_flags & PKT_TX_IP_CKSUM) {
336 tx_offload_mask.l2_len |= ~0;
337 tx_offload_mask.l3_len |= ~0;
340 switch (ol_flags & PKT_TX_L4_MASK) {
341 case PKT_TX_UDP_CKSUM:
343 TXGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
344 tx_offload_mask.l2_len |= ~0;
345 tx_offload_mask.l3_len |= ~0;
347 case PKT_TX_TCP_CKSUM:
349 TXGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
350 tx_offload_mask.l2_len |= ~0;
351 tx_offload_mask.l3_len |= ~0;
353 case PKT_TX_SCTP_CKSUM:
355 TXGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
356 tx_offload_mask.l2_len |= ~0;
357 tx_offload_mask.l3_len |= ~0;
364 vlan_macip_lens = TXGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
366 if (ol_flags & PKT_TX_TUNNEL_MASK) {
367 tx_offload_mask.outer_tun_len |= ~0;
368 tx_offload_mask.outer_l2_len |= ~0;
369 tx_offload_mask.outer_l3_len |= ~0;
370 tx_offload_mask.l2_len |= ~0;
371 tunnel_seed = TXGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
372 tunnel_seed |= TXGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
374 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
375 case PKT_TX_TUNNEL_IPIP:
376 /* for non UDP / GRE tunneling, set to 0b */
378 case PKT_TX_TUNNEL_VXLAN:
379 case PKT_TX_TUNNEL_GENEVE:
380 tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
382 case PKT_TX_TUNNEL_GRE:
383 tunnel_seed |= TXGBE_TXD_ETYPE_GRE;
386 PMD_TX_LOG(ERR, "Tunnel type not supported");
389 vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.outer_l2_len);
392 vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
395 if (ol_flags & PKT_TX_VLAN_PKT) {
396 tx_offload_mask.vlan_tci |= ~0;
397 vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
400 txq->ctx_cache[ctx_idx].flags = ol_flags;
401 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
402 tx_offload_mask.data[0] & tx_offload.data[0];
403 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
404 tx_offload_mask.data[1] & tx_offload.data[1];
405 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
407 ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
408 ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
409 ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
410 ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
414 * Check which hardware context can be used. Use the existing match
415 * or create a new context descriptor.
417 static inline uint32_t
418 what_ctx_update(struct txgbe_tx_queue *txq, uint64_t flags,
419 union txgbe_tx_offload tx_offload)
421 /* If match with the current used context */
422 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
423 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
424 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
425 & tx_offload.data[0])) &&
426 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
427 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
428 & tx_offload.data[1]))))
429 return txq->ctx_curr;
431 /* What if match with the next context */
433 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
434 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
435 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
436 & tx_offload.data[0])) &&
437 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
438 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
439 & tx_offload.data[1]))))
440 return txq->ctx_curr;
442 /* Mismatch, use the previous context */
443 return TXGBE_CTX_NUM;
446 static inline uint32_t
447 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
451 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
453 tmp |= TXGBE_TXD_L4CS;
455 if (ol_flags & PKT_TX_IP_CKSUM) {
457 tmp |= TXGBE_TXD_IPCS;
459 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
461 tmp |= TXGBE_TXD_EIPCS;
463 if (ol_flags & PKT_TX_TCP_SEG) {
465 /* implies IPv4 cksum */
466 if (ol_flags & PKT_TX_IPV4)
467 tmp |= TXGBE_TXD_IPCS;
468 tmp |= TXGBE_TXD_L4CS;
470 if (ol_flags & PKT_TX_VLAN_PKT)
476 static inline uint32_t
477 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
479 uint32_t cmdtype = 0;
481 if (ol_flags & PKT_TX_VLAN_PKT)
482 cmdtype |= TXGBE_TXD_VLE;
483 if (ol_flags & PKT_TX_TCP_SEG)
484 cmdtype |= TXGBE_TXD_TSE;
485 if (ol_flags & PKT_TX_MACSEC)
486 cmdtype |= TXGBE_TXD_LINKSEC;
490 static inline uint8_t
491 tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
496 return txgbe_encode_ptype(ptype);
498 /* Only support flags in TXGBE_TX_OFFLOAD_MASK */
499 tun = !!(oflags & PKT_TX_TUNNEL_MASK);
502 ptype = RTE_PTYPE_L2_ETHER;
503 if (oflags & PKT_TX_VLAN)
504 ptype |= RTE_PTYPE_L2_ETHER_VLAN;
507 if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
508 ptype |= RTE_PTYPE_L3_IPV4;
509 else if (oflags & (PKT_TX_OUTER_IPV6))
510 ptype |= RTE_PTYPE_L3_IPV6;
512 if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM))
513 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
514 else if (oflags & (PKT_TX_IPV6))
515 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
518 switch (oflags & (PKT_TX_L4_MASK)) {
519 case PKT_TX_TCP_CKSUM:
520 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
522 case PKT_TX_UDP_CKSUM:
523 ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
525 case PKT_TX_SCTP_CKSUM:
526 ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
530 if (oflags & PKT_TX_TCP_SEG)
531 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
534 switch (oflags & PKT_TX_TUNNEL_MASK) {
535 case PKT_TX_TUNNEL_VXLAN:
536 ptype |= RTE_PTYPE_L2_ETHER |
538 RTE_PTYPE_TUNNEL_VXLAN;
539 ptype |= RTE_PTYPE_INNER_L2_ETHER;
541 case PKT_TX_TUNNEL_GRE:
542 ptype |= RTE_PTYPE_L2_ETHER |
544 RTE_PTYPE_TUNNEL_GRE;
545 ptype |= RTE_PTYPE_INNER_L2_ETHER;
547 case PKT_TX_TUNNEL_GENEVE:
548 ptype |= RTE_PTYPE_L2_ETHER |
550 RTE_PTYPE_TUNNEL_GENEVE;
551 ptype |= RTE_PTYPE_INNER_L2_ETHER;
553 case PKT_TX_TUNNEL_VXLAN_GPE:
554 ptype |= RTE_PTYPE_L2_ETHER |
556 RTE_PTYPE_TUNNEL_VXLAN_GPE;
557 ptype |= RTE_PTYPE_INNER_L2_ETHER;
559 case PKT_TX_TUNNEL_IPIP:
560 case PKT_TX_TUNNEL_IP:
561 ptype |= RTE_PTYPE_L2_ETHER |
567 return txgbe_encode_ptype(ptype);
570 #ifndef DEFAULT_TX_FREE_THRESH
571 #define DEFAULT_TX_FREE_THRESH 32
574 /* Reset transmit descriptors after they have been used */
576 txgbe_xmit_cleanup(struct txgbe_tx_queue *txq)
578 struct txgbe_tx_entry *sw_ring = txq->sw_ring;
579 volatile struct txgbe_tx_desc *txr = txq->tx_ring;
580 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
581 uint16_t nb_tx_desc = txq->nb_tx_desc;
582 uint16_t desc_to_clean_to;
583 uint16_t nb_tx_to_clean;
586 /* Determine the last descriptor needing to be cleaned */
587 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
588 if (desc_to_clean_to >= nb_tx_desc)
589 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
591 /* Check to make sure the last descriptor to clean is done */
592 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
593 status = txr[desc_to_clean_to].dw3;
594 if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
595 PMD_TX_FREE_LOG(DEBUG,
596 "TX descriptor %4u is not done"
597 "(port=%d queue=%d)",
599 txq->port_id, txq->queue_id);
600 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
601 txgbe_set32_masked(txq->tdc_reg_addr,
602 TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
603 /* Failed to clean any descriptors, better luck next time */
607 /* Figure out how many descriptors will be cleaned */
608 if (last_desc_cleaned > desc_to_clean_to)
609 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
612 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
615 PMD_TX_FREE_LOG(DEBUG,
616 "Cleaning %4u TX descriptors: %4u to %4u "
617 "(port=%d queue=%d)",
618 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
619 txq->port_id, txq->queue_id);
622 * The last descriptor to clean is done, so that means all the
623 * descriptors from the last descriptor that was cleaned
624 * up to the last descriptor with the RS bit set
625 * are done. Only reset the threshold descriptor.
627 txr[desc_to_clean_to].dw3 = 0;
629 /* Update the txq to reflect the last descriptor that was cleaned */
630 txq->last_desc_cleaned = desc_to_clean_to;
631 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
637 static inline uint8_t
638 txgbe_get_tun_len(struct rte_mbuf *mbuf)
640 struct txgbe_genevehdr genevehdr;
641 const struct txgbe_genevehdr *gh;
644 switch (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) {
645 case PKT_TX_TUNNEL_IPIP:
648 case PKT_TX_TUNNEL_VXLAN:
649 case PKT_TX_TUNNEL_VXLAN_GPE:
650 tun_len = sizeof(struct txgbe_udphdr)
651 + sizeof(struct txgbe_vxlanhdr);
653 case PKT_TX_TUNNEL_GRE:
654 tun_len = sizeof(struct txgbe_nvgrehdr);
656 case PKT_TX_TUNNEL_GENEVE:
657 gh = rte_pktmbuf_read(mbuf,
658 mbuf->outer_l2_len + mbuf->outer_l3_len,
659 sizeof(genevehdr), &genevehdr);
660 tun_len = sizeof(struct txgbe_udphdr)
661 + sizeof(struct txgbe_genevehdr)
662 + (gh->opt_len << 2);
672 txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
675 struct txgbe_tx_queue *txq;
676 struct txgbe_tx_entry *sw_ring;
677 struct txgbe_tx_entry *txe, *txn;
678 volatile struct txgbe_tx_desc *txr;
679 volatile struct txgbe_tx_desc *txd;
680 struct rte_mbuf *tx_pkt;
681 struct rte_mbuf *m_seg;
682 uint64_t buf_dma_addr;
683 uint32_t olinfo_status;
684 uint32_t cmd_type_len;
695 union txgbe_tx_offload tx_offload;
697 tx_offload.data[0] = 0;
698 tx_offload.data[1] = 0;
700 sw_ring = txq->sw_ring;
702 tx_id = txq->tx_tail;
703 txe = &sw_ring[tx_id];
705 /* Determine if the descriptor ring needs to be cleaned. */
706 if (txq->nb_tx_free < txq->tx_free_thresh)
707 txgbe_xmit_cleanup(txq);
709 rte_prefetch0(&txe->mbuf->pool);
712 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
715 pkt_len = tx_pkt->pkt_len;
718 * Determine how many (if any) context descriptors
719 * are needed for offload functionality.
721 ol_flags = tx_pkt->ol_flags;
723 /* If hardware offload required */
724 tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
726 tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
727 tx_pkt->packet_type);
728 tx_offload.l2_len = tx_pkt->l2_len;
729 tx_offload.l3_len = tx_pkt->l3_len;
730 tx_offload.l4_len = tx_pkt->l4_len;
731 tx_offload.vlan_tci = tx_pkt->vlan_tci;
732 tx_offload.tso_segsz = tx_pkt->tso_segsz;
733 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
734 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
735 tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
737 /* If new context need be built or reuse the exist ctx*/
738 ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
739 /* Only allocate context descriptor if required */
740 new_ctx = (ctx == TXGBE_CTX_NUM);
745 * Keep track of how many descriptors are used this loop
746 * This will always be the number of segments + the number of
747 * Context descriptors required to transmit the packet
749 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
752 * The number of descriptors that must be allocated for a
753 * packet is the number of segments of that packet, plus 1
754 * Context Descriptor for the hardware offload, if any.
755 * Determine the last TX descriptor to allocate in the TX ring
756 * for the packet, starting from the current position (tx_id)
759 tx_last = (uint16_t)(tx_id + nb_used - 1);
762 if (tx_last >= txq->nb_tx_desc)
763 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
765 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
766 " tx_first=%u tx_last=%u",
767 (uint16_t)txq->port_id,
768 (uint16_t)txq->queue_id,
774 * Make sure there are enough TX descriptors available to
775 * transmit the entire packet.
776 * nb_used better be less than or equal to txq->tx_free_thresh
778 if (nb_used > txq->nb_tx_free) {
779 PMD_TX_FREE_LOG(DEBUG,
780 "Not enough free TX descriptors "
781 "nb_used=%4u nb_free=%4u "
782 "(port=%d queue=%d)",
783 nb_used, txq->nb_tx_free,
784 txq->port_id, txq->queue_id);
786 if (txgbe_xmit_cleanup(txq) != 0) {
787 /* Could not clean any descriptors */
793 /* nb_used better be <= txq->tx_free_thresh */
794 if (unlikely(nb_used > txq->tx_free_thresh)) {
795 PMD_TX_FREE_LOG(DEBUG,
796 "The number of descriptors needed to "
797 "transmit the packet exceeds the "
798 "RS bit threshold. This will impact "
800 "nb_used=%4u nb_free=%4u "
801 "tx_free_thresh=%4u. "
802 "(port=%d queue=%d)",
803 nb_used, txq->nb_tx_free,
805 txq->port_id, txq->queue_id);
807 * Loop here until there are enough TX
808 * descriptors or until the ring cannot be
811 while (nb_used > txq->nb_tx_free) {
812 if (txgbe_xmit_cleanup(txq) != 0) {
814 * Could not clean any
826 * By now there are enough free TX descriptors to transmit
831 * Set common flags of all TX Data Descriptors.
833 * The following bits must be set in all Data Descriptors:
834 * - TXGBE_TXD_DTYP_DATA
835 * - TXGBE_TXD_DCMD_DEXT
837 * The following bits must be set in the first Data Descriptor
838 * and are ignored in the other ones:
839 * - TXGBE_TXD_DCMD_IFCS
840 * - TXGBE_TXD_MAC_1588
841 * - TXGBE_TXD_DCMD_VLE
843 * The following bits must only be set in the last Data
845 * - TXGBE_TXD_CMD_EOP
847 * The following bits can be set in any Data Descriptor, but
848 * are only set in the last Data Descriptor:
851 cmd_type_len = TXGBE_TXD_FCS;
855 if (ol_flags & PKT_TX_TCP_SEG) {
856 /* when TSO is on, paylen in descriptor is the
857 * not the packet len but the tcp payload len
859 pkt_len -= (tx_offload.l2_len +
860 tx_offload.l3_len + tx_offload.l4_len);
862 (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
863 ? tx_offload.outer_l2_len +
864 tx_offload.outer_l3_len : 0;
868 * Setup the TX Advanced Context Descriptor if required
871 volatile struct txgbe_tx_ctx_desc *ctx_txd;
873 ctx_txd = (volatile struct txgbe_tx_ctx_desc *)
876 txn = &sw_ring[txe->next_id];
877 rte_prefetch0(&txn->mbuf->pool);
879 if (txe->mbuf != NULL) {
880 rte_pktmbuf_free_seg(txe->mbuf);
884 txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
887 txe->last_id = tx_last;
888 tx_id = txe->next_id;
893 * Setup the TX Advanced Data Descriptor,
894 * This path will go through
895 * whatever new/reuse the context descriptor
897 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
899 tx_desc_cksum_flags_to_olinfo(ol_flags);
900 olinfo_status |= TXGBE_TXD_IDX(ctx);
903 olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
908 txn = &sw_ring[txe->next_id];
909 rte_prefetch0(&txn->mbuf->pool);
911 if (txe->mbuf != NULL)
912 rte_pktmbuf_free_seg(txe->mbuf);
916 * Set up Transmit Data Descriptor.
918 slen = m_seg->data_len;
919 buf_dma_addr = rte_mbuf_data_iova(m_seg);
920 txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
921 txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
922 txd->dw3 = rte_cpu_to_le_32(olinfo_status);
923 txe->last_id = tx_last;
924 tx_id = txe->next_id;
927 } while (m_seg != NULL);
930 * The last packet data descriptor needs End Of Packet (EOP)
932 cmd_type_len |= TXGBE_TXD_EOP;
933 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
935 txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
943 * Set the Transmit Descriptor Tail (TDT)
945 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
946 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
947 (uint16_t)tx_id, (uint16_t)nb_tx);
948 txgbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
949 txq->tx_tail = tx_id;
954 /*********************************************************************
958 **********************************************************************/
960 txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
965 struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
967 for (i = 0; i < nb_pkts; i++) {
969 ol_flags = m->ol_flags;
972 * Check if packet meets requirements for number of segments
974 * NOTE: for txgbe it's always (40 - WTHRESH) for both TSO and
978 if (m->nb_segs > TXGBE_TX_MAX_SEG - txq->wthresh) {
983 if (ol_flags & TXGBE_TX_OFFLOAD_NOTSUP_MASK) {
984 rte_errno = -ENOTSUP;
988 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
989 ret = rte_validate_tx_offload(m);
995 ret = rte_net_intel_cksum_prepare(m);
1005 /*********************************************************************
1009 **********************************************************************/
1010 /* @note: fix txgbe_dev_supported_ptypes_get() if any change here. */
1011 static inline uint32_t
1012 txgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
1014 uint16_t ptid = TXGBE_RXD_PTID(pkt_info);
1018 return txgbe_decode_ptype(ptid);
1021 static inline uint64_t
1022 txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
1024 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1025 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH,
1026 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH,
1027 PKT_RX_RSS_HASH, 0, 0, 0,
1028 0, 0, 0, PKT_RX_FDIR,
1031 return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
1034 static inline uint64_t
1035 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1040 * Check if VLAN present only.
1041 * Do not check whether L3/L4 rx checksum done by NIC or not,
1042 * That can be found from rte_eth_rxmode.offloads flag
1044 pkt_flags = (rx_status & TXGBE_RXD_STAT_VLAN &&
1045 vlan_flags & PKT_RX_VLAN_STRIPPED)
1051 static inline uint64_t
1052 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1054 uint64_t pkt_flags = 0;
1056 /* checksum offload can't be disabled */
1057 if (rx_status & TXGBE_RXD_STAT_IPCS) {
1058 pkt_flags |= (rx_status & TXGBE_RXD_ERR_IPCS
1059 ? PKT_RX_IP_CKSUM_BAD : PKT_RX_IP_CKSUM_GOOD);
1062 if (rx_status & TXGBE_RXD_STAT_L4CS) {
1063 pkt_flags |= (rx_status & TXGBE_RXD_ERR_L4CS
1064 ? PKT_RX_L4_CKSUM_BAD : PKT_RX_L4_CKSUM_GOOD);
1067 if (rx_status & TXGBE_RXD_STAT_EIPCS &&
1068 rx_status & TXGBE_RXD_ERR_EIPCS) {
1069 pkt_flags |= PKT_RX_EIP_CKSUM_BAD;
1076 * LOOK_AHEAD defines how many desc statuses to check beyond the
1077 * current descriptor.
1078 * It must be a pound define for optimal performance.
1079 * Do not change the value of LOOK_AHEAD, as the txgbe_rx_scan_hw_ring
1080 * function only works with LOOK_AHEAD=8.
1082 #define LOOK_AHEAD 8
1083 #if (LOOK_AHEAD != 8)
1084 #error "PMD TXGBE: LOOK_AHEAD must be 8\n"
1087 txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq)
1089 volatile struct txgbe_rx_desc *rxdp;
1090 struct txgbe_rx_entry *rxep;
1091 struct rte_mbuf *mb;
1095 uint32_t s[LOOK_AHEAD];
1096 uint32_t pkt_info[LOOK_AHEAD];
1097 int i, j, nb_rx = 0;
1100 /* get references to current descriptor and S/W ring entry */
1101 rxdp = &rxq->rx_ring[rxq->rx_tail];
1102 rxep = &rxq->sw_ring[rxq->rx_tail];
1104 status = rxdp->qw1.lo.status;
1105 /* check to make sure there is at least 1 packet to receive */
1106 if (!(status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
1110 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1111 * reference packets that are ready to be received.
1113 for (i = 0; i < RTE_PMD_TXGBE_RX_MAX_BURST;
1114 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1115 /* Read desc statuses backwards to avoid race condition */
1116 for (j = 0; j < LOOK_AHEAD; j++)
1117 s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
1121 /* Compute how many status bits were set */
1122 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1123 (s[nb_dd] & TXGBE_RXD_STAT_DD); nb_dd++)
1126 for (j = 0; j < nb_dd; j++)
1127 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
1131 /* Translate descriptor info to mbuf format */
1132 for (j = 0; j < nb_dd; ++j) {
1134 pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
1136 mb->data_len = pkt_len;
1137 mb->pkt_len = pkt_len;
1138 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
1140 /* convert descriptor fields to rte mbuf flags */
1141 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1143 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1145 txgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1146 mb->ol_flags = pkt_flags;
1148 txgbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
1149 rxq->pkt_type_mask);
1151 if (likely(pkt_flags & PKT_RX_RSS_HASH))
1153 rte_le_to_cpu_32(rxdp[j].qw0.dw1);
1154 else if (pkt_flags & PKT_RX_FDIR) {
1155 mb->hash.fdir.hash =
1156 rte_le_to_cpu_16(rxdp[j].qw0.hi.csum) &
1157 TXGBE_ATR_HASH_MASK;
1159 rte_le_to_cpu_16(rxdp[j].qw0.hi.ipid);
1163 /* Move mbuf pointers from the S/W ring to the stage */
1164 for (j = 0; j < LOOK_AHEAD; ++j)
1165 rxq->rx_stage[i + j] = rxep[j].mbuf;
1167 /* stop if all requested packets could not be received */
1168 if (nb_dd != LOOK_AHEAD)
1172 /* clear software ring entries so we can cleanup correctly */
1173 for (i = 0; i < nb_rx; ++i)
1174 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1180 txgbe_rx_alloc_bufs(struct txgbe_rx_queue *rxq, bool reset_mbuf)
1182 volatile struct txgbe_rx_desc *rxdp;
1183 struct txgbe_rx_entry *rxep;
1184 struct rte_mbuf *mb;
1189 /* allocate buffers in bulk directly into the S/W ring */
1190 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1191 rxep = &rxq->sw_ring[alloc_idx];
1192 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1193 rxq->rx_free_thresh);
1194 if (unlikely(diag != 0))
1197 rxdp = &rxq->rx_ring[alloc_idx];
1198 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1199 /* populate the static rte mbuf fields */
1202 mb->port = rxq->port_id;
1204 rte_mbuf_refcnt_set(mb, 1);
1205 mb->data_off = RTE_PKTMBUF_HEADROOM;
1207 /* populate the descriptors */
1208 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1209 TXGBE_RXD_HDRADDR(&rxdp[i], 0);
1210 TXGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
1213 /* update state of internal queue structure */
1214 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1215 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1216 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1222 static inline uint16_t
1223 txgbe_rx_fill_from_stage(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1226 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1229 /* how many packets are ready to return? */
1230 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1232 /* copy mbuf pointers to the application's packet list */
1233 for (i = 0; i < nb_pkts; ++i)
1234 rx_pkts[i] = stage[i];
1236 /* update internal queue state */
1237 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1238 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1243 static inline uint16_t
1244 txgbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1247 struct txgbe_rx_queue *rxq = (struct txgbe_rx_queue *)rx_queue;
1248 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1251 /* Any previously recv'd pkts will be returned from the Rx stage */
1252 if (rxq->rx_nb_avail)
1253 return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1255 /* Scan the H/W ring for packets to receive */
1256 nb_rx = (uint16_t)txgbe_rx_scan_hw_ring(rxq);
1258 /* update internal queue state */
1259 rxq->rx_next_avail = 0;
1260 rxq->rx_nb_avail = nb_rx;
1261 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1263 /* if required, allocate new buffers to replenish descriptors */
1264 if (rxq->rx_tail > rxq->rx_free_trigger) {
1265 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1267 if (txgbe_rx_alloc_bufs(rxq, true) != 0) {
1270 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1271 "queue_id=%u", (uint16_t)rxq->port_id,
1272 (uint16_t)rxq->queue_id);
1274 dev->data->rx_mbuf_alloc_failed +=
1275 rxq->rx_free_thresh;
1278 * Need to rewind any previous receives if we cannot
1279 * allocate new buffers to replenish the old ones.
1281 rxq->rx_nb_avail = 0;
1282 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1283 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1284 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1289 /* update tail pointer */
1291 txgbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
1294 if (rxq->rx_tail >= rxq->nb_rx_desc)
1297 /* received any packets this loop? */
1298 if (rxq->rx_nb_avail)
1299 return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1304 /* split requests into chunks of size RTE_PMD_TXGBE_RX_MAX_BURST */
1306 txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1311 if (unlikely(nb_pkts == 0))
1314 if (likely(nb_pkts <= RTE_PMD_TXGBE_RX_MAX_BURST))
1315 return txgbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1317 /* request is relatively large, chunk it up */
1322 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_RX_MAX_BURST);
1323 ret = txgbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1324 nb_rx = (uint16_t)(nb_rx + ret);
1325 nb_pkts = (uint16_t)(nb_pkts - ret);
1334 txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1337 struct txgbe_rx_queue *rxq;
1338 volatile struct txgbe_rx_desc *rx_ring;
1339 volatile struct txgbe_rx_desc *rxdp;
1340 struct txgbe_rx_entry *sw_ring;
1341 struct txgbe_rx_entry *rxe;
1342 struct rte_mbuf *rxm;
1343 struct rte_mbuf *nmb;
1344 struct txgbe_rx_desc rxd;
1357 rx_id = rxq->rx_tail;
1358 rx_ring = rxq->rx_ring;
1359 sw_ring = rxq->sw_ring;
1360 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1361 while (nb_rx < nb_pkts) {
1363 * The order of operations here is important as the DD status
1364 * bit must not be read after any other descriptor fields.
1365 * rx_ring and rxdp are pointing to volatile data so the order
1366 * of accesses cannot be reordered by the compiler. If they were
1367 * not volatile, they could be reordered which could lead to
1368 * using invalid descriptor fields when read from rxd.
1370 rxdp = &rx_ring[rx_id];
1371 staterr = rxdp->qw1.lo.status;
1372 if (!(staterr & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
1379 * If the TXGBE_RXD_STAT_EOP flag is not set, the RX packet
1380 * is likely to be invalid and to be dropped by the various
1381 * validation checks performed by the network stack.
1383 * Allocate a new mbuf to replenish the RX ring descriptor.
1384 * If the allocation fails:
1385 * - arrange for that RX descriptor to be the first one
1386 * being parsed the next time the receive function is
1387 * invoked [on the same queue].
1389 * - Stop parsing the RX ring and return immediately.
1391 * This policy do not drop the packet received in the RX
1392 * descriptor for which the allocation of a new mbuf failed.
1393 * Thus, it allows that packet to be later retrieved if
1394 * mbuf have been freed in the mean time.
1395 * As a side effect, holding RX descriptors instead of
1396 * systematically giving them back to the NIC may lead to
1397 * RX ring exhaustion situations.
1398 * However, the NIC can gracefully prevent such situations
1399 * to happen by sending specific "back-pressure" flow control
1400 * frames to its peer(s).
1402 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1403 "ext_err_stat=0x%08x pkt_len=%u",
1404 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1405 (uint16_t)rx_id, (uint32_t)staterr,
1406 (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
1408 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1410 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1411 "queue_id=%u", (uint16_t)rxq->port_id,
1412 (uint16_t)rxq->queue_id);
1413 dev->data->rx_mbuf_alloc_failed++;
1418 rxe = &sw_ring[rx_id];
1420 if (rx_id == rxq->nb_rx_desc)
1423 /* Prefetch next mbuf while processing current one. */
1424 rte_txgbe_prefetch(sw_ring[rx_id].mbuf);
1427 * When next RX descriptor is on a cache-line boundary,
1428 * prefetch the next 4 RX descriptors and the next 8 pointers
1431 if ((rx_id & 0x3) == 0) {
1432 rte_txgbe_prefetch(&rx_ring[rx_id]);
1433 rte_txgbe_prefetch(&sw_ring[rx_id]);
1438 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1439 TXGBE_RXD_HDRADDR(rxdp, 0);
1440 TXGBE_RXD_PKTADDR(rxdp, dma_addr);
1443 * Initialize the returned mbuf.
1444 * 1) setup generic mbuf fields:
1445 * - number of segments,
1448 * - RX port identifier.
1449 * 2) integrate hardware offload data, if any:
1450 * - RSS flag & hash,
1451 * - IP checksum flag,
1452 * - VLAN TCI, if any,
1455 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
1457 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1458 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1461 rxm->pkt_len = pkt_len;
1462 rxm->data_len = pkt_len;
1463 rxm->port = rxq->port_id;
1465 pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
1466 /* Only valid if PKT_RX_VLAN set in pkt_flags */
1467 rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
1469 pkt_flags = rx_desc_status_to_pkt_flags(staterr,
1471 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1472 pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1473 rxm->ol_flags = pkt_flags;
1474 rxm->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1475 rxq->pkt_type_mask);
1477 if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
1478 rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
1479 } else if (pkt_flags & PKT_RX_FDIR) {
1480 rxm->hash.fdir.hash =
1481 rte_le_to_cpu_16(rxd.qw0.hi.csum) &
1482 TXGBE_ATR_HASH_MASK;
1483 rxm->hash.fdir.id = rte_le_to_cpu_16(rxd.qw0.hi.ipid);
1486 * Store the mbuf address into the next entry of the array
1487 * of returned packets.
1489 rx_pkts[nb_rx++] = rxm;
1491 rxq->rx_tail = rx_id;
1494 * If the number of free RX descriptors is greater than the RX free
1495 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1497 * Update the RDT with the value of the last processed RX descriptor
1498 * minus 1, to guarantee that the RDT register is never equal to the
1499 * RDH register, which creates a "full" ring situation from the
1500 * hardware point of view...
1502 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1503 if (nb_hold > rxq->rx_free_thresh) {
1504 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1505 "nb_hold=%u nb_rx=%u",
1506 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1507 (uint16_t)rx_id, (uint16_t)nb_hold,
1509 rx_id = (uint16_t)((rx_id == 0) ?
1510 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1511 txgbe_set32(rxq->rdt_reg_addr, rx_id);
1514 rxq->nb_rx_hold = nb_hold;
1519 * txgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1521 * Fill the following info in the HEAD buffer of the Rx cluster:
1522 * - RX port identifier
1523 * - hardware offload data, if any:
1525 * - IP checksum flag
1526 * - VLAN TCI, if any
1528 * @head HEAD of the packet cluster
1529 * @desc HW descriptor to get data from
1530 * @rxq Pointer to the Rx queue
1533 txgbe_fill_cluster_head_buf(struct rte_mbuf *head, struct txgbe_rx_desc *desc,
1534 struct txgbe_rx_queue *rxq, uint32_t staterr)
1539 head->port = rxq->port_id;
1541 /* The vlan_tci field is only valid when PKT_RX_VLAN is
1542 * set in the pkt_flags field.
1544 head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
1545 pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
1546 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1547 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1548 pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1549 head->ol_flags = pkt_flags;
1550 head->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1551 rxq->pkt_type_mask);
1553 if (likely(pkt_flags & PKT_RX_RSS_HASH)) {
1554 head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
1555 } else if (pkt_flags & PKT_RX_FDIR) {
1556 head->hash.fdir.hash = rte_le_to_cpu_16(desc->qw0.hi.csum)
1557 & TXGBE_ATR_HASH_MASK;
1558 head->hash.fdir.id = rte_le_to_cpu_16(desc->qw0.hi.ipid);
1563 * txgbe_recv_pkts_lro - receive handler for and LRO case.
1565 * @rx_queue Rx queue handle
1566 * @rx_pkts table of received packets
1567 * @nb_pkts size of rx_pkts table
1568 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1570 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1571 * additional ring of txgbe_rsc_entry's that will hold the relevant RSC info.
1573 * We use the same logic as in Linux and in FreeBSD txgbe drivers:
1574 * 1) When non-EOP RSC completion arrives:
1575 * a) Update the HEAD of the current RSC aggregation cluster with the new
1576 * segment's data length.
1577 * b) Set the "next" pointer of the current segment to point to the segment
1578 * at the NEXTP index.
1579 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1580 * in the sw_rsc_ring.
1581 * 2) When EOP arrives we just update the cluster's total length and offload
1582 * flags and deliver the cluster up to the upper layers. In our case - put it
1583 * in the rx_pkts table.
1585 * Returns the number of received packets/clusters (according to the "bulk
1586 * receive" interface).
1588 static inline uint16_t
1589 txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1592 struct txgbe_rx_queue *rxq = rx_queue;
1593 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1594 volatile struct txgbe_rx_desc *rx_ring = rxq->rx_ring;
1595 struct txgbe_rx_entry *sw_ring = rxq->sw_ring;
1596 struct txgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1597 uint16_t rx_id = rxq->rx_tail;
1599 uint16_t nb_hold = rxq->nb_rx_hold;
1600 uint16_t prev_id = rxq->rx_tail;
1602 while (nb_rx < nb_pkts) {
1604 struct txgbe_rx_entry *rxe;
1605 struct txgbe_scattered_rx_entry *sc_entry;
1606 struct txgbe_scattered_rx_entry *next_sc_entry = NULL;
1607 struct txgbe_rx_entry *next_rxe = NULL;
1608 struct rte_mbuf *first_seg;
1609 struct rte_mbuf *rxm;
1610 struct rte_mbuf *nmb = NULL;
1611 struct txgbe_rx_desc rxd;
1614 volatile struct txgbe_rx_desc *rxdp;
1619 * The code in this whole file uses the volatile pointer to
1620 * ensure the read ordering of the status and the rest of the
1621 * descriptor fields (on the compiler level only!!!). This is so
1622 * UGLY - why not to just use the compiler barrier instead? DPDK
1623 * even has the rte_compiler_barrier() for that.
1625 * But most importantly this is just wrong because this doesn't
1626 * ensure memory ordering in a general case at all. For
1627 * instance, DPDK is supposed to work on Power CPUs where
1628 * compiler barrier may just not be enough!
1630 * I tried to write only this function properly to have a
1631 * starting point (as a part of an LRO/RSC series) but the
1632 * compiler cursed at me when I tried to cast away the
1633 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1634 * keeping it the way it is for now.
1636 * The code in this file is broken in so many other places and
1637 * will just not work on a big endian CPU anyway therefore the
1638 * lines below will have to be revisited together with the rest
1642 * - Get rid of "volatile" and let the compiler do its job.
1643 * - Use the proper memory barrier (rte_rmb()) to ensure the
1644 * memory ordering below.
1646 rxdp = &rx_ring[rx_id];
1647 staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
1649 if (!(staterr & TXGBE_RXD_STAT_DD))
1654 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1655 "staterr=0x%x data_len=%u",
1656 rxq->port_id, rxq->queue_id, rx_id, staterr,
1657 rte_le_to_cpu_16(rxd.qw1.hi.len));
1660 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1662 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1663 "port_id=%u queue_id=%u",
1664 rxq->port_id, rxq->queue_id);
1666 dev->data->rx_mbuf_alloc_failed++;
1669 } else if (nb_hold > rxq->rx_free_thresh) {
1670 uint16_t next_rdt = rxq->rx_free_trigger;
1672 if (!txgbe_rx_alloc_bufs(rxq, false)) {
1674 txgbe_set32_relaxed(rxq->rdt_reg_addr,
1676 nb_hold -= rxq->rx_free_thresh;
1678 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1679 "port_id=%u queue_id=%u",
1680 rxq->port_id, rxq->queue_id);
1682 dev->data->rx_mbuf_alloc_failed++;
1688 rxe = &sw_ring[rx_id];
1689 eop = staterr & TXGBE_RXD_STAT_EOP;
1691 next_id = rx_id + 1;
1692 if (next_id == rxq->nb_rx_desc)
1695 /* Prefetch next mbuf while processing current one. */
1696 rte_txgbe_prefetch(sw_ring[next_id].mbuf);
1699 * When next RX descriptor is on a cache-line boundary,
1700 * prefetch the next 4 RX descriptors and the next 4 pointers
1703 if ((next_id & 0x3) == 0) {
1704 rte_txgbe_prefetch(&rx_ring[next_id]);
1705 rte_txgbe_prefetch(&sw_ring[next_id]);
1712 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1714 * Update RX descriptor with the physical address of the
1715 * new data buffer of the new allocated mbuf.
1719 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1720 TXGBE_RXD_HDRADDR(rxdp, 0);
1721 TXGBE_RXD_PKTADDR(rxdp, dma);
1727 * Set data length & data buffer address of mbuf.
1729 data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
1730 rxm->data_len = data_len;
1735 * Get next descriptor index:
1736 * - For RSC it's in the NEXTP field.
1737 * - For a scattered packet - it's just a following
1740 if (TXGBE_RXD_RSCCNT(rxd.qw0.dw0))
1741 nextp_id = TXGBE_RXD_NEXTP(staterr);
1745 next_sc_entry = &sw_sc_ring[nextp_id];
1746 next_rxe = &sw_ring[nextp_id];
1747 rte_txgbe_prefetch(next_rxe);
1750 sc_entry = &sw_sc_ring[rx_id];
1751 first_seg = sc_entry->fbuf;
1752 sc_entry->fbuf = NULL;
1755 * If this is the first buffer of the received packet,
1756 * set the pointer to the first mbuf of the packet and
1757 * initialize its context.
1758 * Otherwise, update the total length and the number of segments
1759 * of the current scattered packet, and update the pointer to
1760 * the last mbuf of the current packet.
1762 if (first_seg == NULL) {
1764 first_seg->pkt_len = data_len;
1765 first_seg->nb_segs = 1;
1767 first_seg->pkt_len += data_len;
1768 first_seg->nb_segs++;
1775 * If this is not the last buffer of the received packet, update
1776 * the pointer to the first mbuf at the NEXTP entry in the
1777 * sw_sc_ring and continue to parse the RX ring.
1779 if (!eop && next_rxe) {
1780 rxm->next = next_rxe->mbuf;
1781 next_sc_entry->fbuf = first_seg;
1785 /* Initialize the first mbuf of the returned packet */
1786 txgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
1789 * Deal with the case, when HW CRC srip is disabled.
1790 * That can't happen when LRO is enabled, but still could
1791 * happen for scattered RX mode.
1793 first_seg->pkt_len -= rxq->crc_len;
1794 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1795 struct rte_mbuf *lp;
1797 for (lp = first_seg; lp->next != rxm; lp = lp->next)
1800 first_seg->nb_segs--;
1801 lp->data_len -= rxq->crc_len - rxm->data_len;
1803 rte_pktmbuf_free_seg(rxm);
1805 rxm->data_len -= rxq->crc_len;
1808 /* Prefetch data of first segment, if configured to do so. */
1809 rte_packet_prefetch((char *)first_seg->buf_addr +
1810 first_seg->data_off);
1813 * Store the mbuf address into the next entry of the array
1814 * of returned packets.
1816 rx_pkts[nb_rx++] = first_seg;
1820 * Record index of the next RX descriptor to probe.
1822 rxq->rx_tail = rx_id;
1825 * If the number of free RX descriptors is greater than the RX free
1826 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1828 * Update the RDT with the value of the last processed RX descriptor
1829 * minus 1, to guarantee that the RDT register is never equal to the
1830 * RDH register, which creates a "full" ring situation from the
1831 * hardware point of view...
1833 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1834 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1835 "nb_hold=%u nb_rx=%u",
1836 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1839 txgbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
1843 rxq->nb_rx_hold = nb_hold;
1848 txgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1851 return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1855 txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1858 return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1862 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
1864 return DEV_RX_OFFLOAD_VLAN_STRIP;
1868 txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
1871 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1872 struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
1874 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
1875 DEV_RX_OFFLOAD_UDP_CKSUM |
1876 DEV_RX_OFFLOAD_TCP_CKSUM |
1877 DEV_RX_OFFLOAD_KEEP_CRC |
1878 DEV_RX_OFFLOAD_JUMBO_FRAME |
1879 DEV_RX_OFFLOAD_VLAN_FILTER |
1880 DEV_RX_OFFLOAD_RSS_HASH |
1881 DEV_RX_OFFLOAD_SCATTER;
1883 if (!txgbe_is_vf(dev))
1884 offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
1885 DEV_RX_OFFLOAD_QINQ_STRIP |
1886 DEV_RX_OFFLOAD_VLAN_EXTEND);
1889 * RSC is only supported by PF devices in a non-SR-IOV
1892 if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
1893 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
1895 if (hw->mac.type == txgbe_mac_raptor)
1896 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
1898 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1903 static void __rte_cold
1904 txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
1908 if (txq->sw_ring != NULL) {
1909 for (i = 0; i < txq->nb_tx_desc; i++) {
1910 if (txq->sw_ring[i].mbuf != NULL) {
1911 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1912 txq->sw_ring[i].mbuf = NULL;
1918 static void __rte_cold
1919 txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
1922 txq->sw_ring != NULL)
1923 rte_free(txq->sw_ring);
1926 static void __rte_cold
1927 txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
1929 if (txq != NULL && txq->ops != NULL) {
1930 txq->ops->release_mbufs(txq);
1931 txq->ops->free_swring(txq);
1937 txgbe_dev_tx_queue_release(void *txq)
1939 txgbe_tx_queue_release(txq);
1942 static const struct txgbe_txq_ops def_txq_ops = {
1943 .release_mbufs = txgbe_tx_queue_release_mbufs,
1944 .free_swring = txgbe_tx_free_swring,
1947 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1948 * the queue parameters. Used in tx_queue_setup by primary process and then
1949 * in dev_init by secondary process when attaching to an existing ethdev.
1952 txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
1954 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1955 if (txq->offloads == 0 &&
1956 txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
1957 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1958 dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
1959 dev->tx_pkt_prepare = NULL;
1961 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1963 " - offloads = 0x%" PRIx64,
1966 " - tx_free_thresh = %lu [RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
1967 (unsigned long)txq->tx_free_thresh,
1968 (unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
1969 dev->tx_pkt_burst = txgbe_xmit_pkts;
1970 dev->tx_pkt_prepare = txgbe_prep_pkts;
1975 txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
1983 txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
1985 uint64_t tx_offload_capa;
1988 DEV_TX_OFFLOAD_VLAN_INSERT |
1989 DEV_TX_OFFLOAD_IPV4_CKSUM |
1990 DEV_TX_OFFLOAD_UDP_CKSUM |
1991 DEV_TX_OFFLOAD_TCP_CKSUM |
1992 DEV_TX_OFFLOAD_SCTP_CKSUM |
1993 DEV_TX_OFFLOAD_TCP_TSO |
1994 DEV_TX_OFFLOAD_UDP_TSO |
1995 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1996 DEV_TX_OFFLOAD_IP_TNL_TSO |
1997 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1998 DEV_TX_OFFLOAD_GRE_TNL_TSO |
1999 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
2000 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
2001 DEV_TX_OFFLOAD_MULTI_SEGS;
2003 if (!txgbe_is_vf(dev))
2004 tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
2006 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
2008 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
2010 return tx_offload_capa;
2014 txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2017 unsigned int socket_id,
2018 const struct rte_eth_txconf *tx_conf)
2020 const struct rte_memzone *tz;
2021 struct txgbe_tx_queue *txq;
2022 struct txgbe_hw *hw;
2023 uint16_t tx_free_thresh;
2026 PMD_INIT_FUNC_TRACE();
2027 hw = TXGBE_DEV_HW(dev);
2029 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2032 * Validate number of transmit descriptors.
2033 * It must not exceed hardware maximum, and must be multiple
2036 if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
2037 nb_desc > TXGBE_RING_DESC_MAX ||
2038 nb_desc < TXGBE_RING_DESC_MIN) {
2043 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2044 * descriptors are used or if the number of descriptors required
2045 * to transmit a packet is greater than the number of free TX
2047 * One descriptor in the TX ring is used as a sentinel to avoid a
2048 * H/W race condition, hence the maximum threshold constraints.
2049 * When set to zero use default values.
2051 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2052 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2053 if (tx_free_thresh >= (nb_desc - 3)) {
2054 PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
2055 "TX descriptors minus 3. (tx_free_thresh=%u "
2056 "port=%d queue=%d)",
2057 (unsigned int)tx_free_thresh,
2058 (int)dev->data->port_id, (int)queue_idx);
2062 if ((nb_desc % tx_free_thresh) != 0) {
2063 PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
2064 "number of TX descriptors. (tx_free_thresh=%u "
2065 "port=%d queue=%d)", (unsigned int)tx_free_thresh,
2066 (int)dev->data->port_id, (int)queue_idx);
2070 /* Free memory prior to re-allocation if needed... */
2071 if (dev->data->tx_queues[queue_idx] != NULL) {
2072 txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2073 dev->data->tx_queues[queue_idx] = NULL;
2076 /* First allocate the tx queue data structure */
2077 txq = rte_zmalloc_socket("ethdev TX queue",
2078 sizeof(struct txgbe_tx_queue),
2079 RTE_CACHE_LINE_SIZE, socket_id);
2084 * Allocate TX ring hardware descriptors. A memzone large enough to
2085 * handle the maximum ring size is allocated in order to allow for
2086 * resizing in later calls to the queue setup function.
2088 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2089 sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
2090 TXGBE_ALIGN, socket_id);
2092 txgbe_tx_queue_release(txq);
2096 txq->nb_tx_desc = nb_desc;
2097 txq->tx_free_thresh = tx_free_thresh;
2098 txq->pthresh = tx_conf->tx_thresh.pthresh;
2099 txq->hthresh = tx_conf->tx_thresh.hthresh;
2100 txq->wthresh = tx_conf->tx_thresh.wthresh;
2101 txq->queue_id = queue_idx;
2102 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2103 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2104 txq->port_id = dev->data->port_id;
2105 txq->offloads = offloads;
2106 txq->ops = &def_txq_ops;
2107 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2109 /* Modification to set tail pointer for virtual function
2110 * if vf is detected.
2112 if (hw->mac.type == txgbe_mac_raptor_vf) {
2113 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
2114 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
2116 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw,
2117 TXGBE_TXWP(txq->reg_idx));
2118 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw,
2119 TXGBE_TXCFG(txq->reg_idx));
2122 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
2123 txq->tx_ring = (struct txgbe_tx_desc *)TMZ_VADDR(tz);
2125 /* Allocate software ring */
2126 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2127 sizeof(struct txgbe_tx_entry) * nb_desc,
2128 RTE_CACHE_LINE_SIZE, socket_id);
2129 if (txq->sw_ring == NULL) {
2130 txgbe_tx_queue_release(txq);
2133 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2134 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2136 /* set up scalar TX function as appropriate */
2137 txgbe_set_tx_function(dev, txq);
2139 txq->ops->reset(txq);
2141 dev->data->tx_queues[queue_idx] = txq;
2147 * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2149 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2150 * in the sw_rsc_ring is not set to NULL but rather points to the next
2151 * mbuf of this RSC aggregation (that has not been completed yet and still
2152 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2153 * will just free first "nb_segs" segments of the cluster explicitly by calling
2154 * an rte_pktmbuf_free_seg().
2156 * @m scattered cluster head
2158 static void __rte_cold
2159 txgbe_free_sc_cluster(struct rte_mbuf *m)
2161 uint16_t i, nb_segs = m->nb_segs;
2162 struct rte_mbuf *next_seg;
2164 for (i = 0; i < nb_segs; i++) {
2166 rte_pktmbuf_free_seg(m);
2171 static void __rte_cold
2172 txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
2176 if (rxq->sw_ring != NULL) {
2177 for (i = 0; i < rxq->nb_rx_desc; i++) {
2178 if (rxq->sw_ring[i].mbuf != NULL) {
2179 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2180 rxq->sw_ring[i].mbuf = NULL;
2183 if (rxq->rx_nb_avail) {
2184 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2185 struct rte_mbuf *mb;
2187 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2188 rte_pktmbuf_free_seg(mb);
2190 rxq->rx_nb_avail = 0;
2194 if (rxq->sw_sc_ring)
2195 for (i = 0; i < rxq->nb_rx_desc; i++)
2196 if (rxq->sw_sc_ring[i].fbuf) {
2197 txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2198 rxq->sw_sc_ring[i].fbuf = NULL;
2202 static void __rte_cold
2203 txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
2206 txgbe_rx_queue_release_mbufs(rxq);
2207 rte_free(rxq->sw_ring);
2208 rte_free(rxq->sw_sc_ring);
2214 txgbe_dev_rx_queue_release(void *rxq)
2216 txgbe_rx_queue_release(rxq);
2220 * Check if Rx Burst Bulk Alloc function can be used.
2222 * 0: the preconditions are satisfied and the bulk allocation function
2224 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2225 * function must be used.
2227 static inline int __rte_cold
2228 check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
2233 * Make sure the following pre-conditions are satisfied:
2234 * rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
2235 * rxq->rx_free_thresh < rxq->nb_rx_desc
2236 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2237 * Scattered packets are not supported. This should be checked
2238 * outside of this function.
2240 if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
2241 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2242 "rxq->rx_free_thresh=%d, "
2243 "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
2244 rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
2246 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2247 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2248 "rxq->rx_free_thresh=%d, "
2249 "rxq->nb_rx_desc=%d",
2250 rxq->rx_free_thresh, rxq->nb_rx_desc);
2252 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2253 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2254 "rxq->nb_rx_desc=%d, "
2255 "rxq->rx_free_thresh=%d",
2256 rxq->nb_rx_desc, rxq->rx_free_thresh);
2263 /* Reset dynamic txgbe_rx_queue fields back to defaults */
2264 static void __rte_cold
2265 txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
2267 static const struct txgbe_rx_desc zeroed_desc = {
2268 {{0}, {0} }, {{0}, {0} } };
2270 uint16_t len = rxq->nb_rx_desc;
2273 * By default, the Rx queue setup function allocates enough memory for
2274 * TXGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
2275 * extra memory at the end of the descriptor ring to be zero'd out.
2277 if (adapter->rx_bulk_alloc_allowed)
2278 /* zero out extra memory */
2279 len += RTE_PMD_TXGBE_RX_MAX_BURST;
2282 * Zero out HW ring memory. Zero out extra memory at the end of
2283 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2284 * reads extra memory as zeros.
2286 for (i = 0; i < len; i++)
2287 rxq->rx_ring[i] = zeroed_desc;
2290 * initialize extra software ring entries. Space for these extra
2291 * entries is always allocated
2293 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2294 for (i = rxq->nb_rx_desc; i < len; ++i)
2295 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2297 rxq->rx_nb_avail = 0;
2298 rxq->rx_next_avail = 0;
2299 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2301 rxq->nb_rx_hold = 0;
2302 rxq->pkt_first_seg = NULL;
2303 rxq->pkt_last_seg = NULL;
2307 txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2310 unsigned int socket_id,
2311 const struct rte_eth_rxconf *rx_conf,
2312 struct rte_mempool *mp)
2314 const struct rte_memzone *rz;
2315 struct txgbe_rx_queue *rxq;
2316 struct txgbe_hw *hw;
2318 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2321 PMD_INIT_FUNC_TRACE();
2322 hw = TXGBE_DEV_HW(dev);
2324 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2327 * Validate number of receive descriptors.
2328 * It must not exceed hardware maximum, and must be multiple
2331 if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
2332 nb_desc > TXGBE_RING_DESC_MAX ||
2333 nb_desc < TXGBE_RING_DESC_MIN) {
2337 /* Free memory prior to re-allocation if needed... */
2338 if (dev->data->rx_queues[queue_idx] != NULL) {
2339 txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2340 dev->data->rx_queues[queue_idx] = NULL;
2343 /* First allocate the rx queue data structure */
2344 rxq = rte_zmalloc_socket("ethdev RX queue",
2345 sizeof(struct txgbe_rx_queue),
2346 RTE_CACHE_LINE_SIZE, socket_id);
2350 rxq->nb_rx_desc = nb_desc;
2351 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2352 rxq->queue_id = queue_idx;
2353 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2354 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2355 rxq->port_id = dev->data->port_id;
2356 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2357 rxq->crc_len = RTE_ETHER_CRC_LEN;
2360 rxq->drop_en = rx_conf->rx_drop_en;
2361 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2362 rxq->offloads = offloads;
2365 * The packet type in RX descriptor is different for different NICs.
2366 * So set different masks for different NICs.
2368 rxq->pkt_type_mask = TXGBE_PTID_MASK;
2371 * Allocate RX ring hardware descriptors. A memzone large enough to
2372 * handle the maximum ring size is allocated in order to allow for
2373 * resizing in later calls to the queue setup function.
2375 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2376 RX_RING_SZ, TXGBE_ALIGN, socket_id);
2378 txgbe_rx_queue_release(rxq);
2383 * Zero init all the descriptors in the ring.
2385 memset(rz->addr, 0, RX_RING_SZ);
2388 * Modified to setup VFRDT for Virtual Function
2390 if (hw->mac.type == txgbe_mac_raptor_vf) {
2392 TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
2394 TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
2397 TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
2399 TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
2402 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
2403 rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
2406 * Certain constraints must be met in order to use the bulk buffer
2407 * allocation Rx burst function. If any of Rx queues doesn't meet them
2408 * the feature should be disabled for the whole port.
2410 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2411 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2412 "preconditions - canceling the feature for "
2413 "the whole port[%d]",
2414 rxq->queue_id, rxq->port_id);
2415 adapter->rx_bulk_alloc_allowed = false;
2419 * Allocate software ring. Allow for space at the end of the
2420 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2421 * function does not access an invalid memory region.
2424 if (adapter->rx_bulk_alloc_allowed)
2425 len += RTE_PMD_TXGBE_RX_MAX_BURST;
2427 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2428 sizeof(struct txgbe_rx_entry) * len,
2429 RTE_CACHE_LINE_SIZE, socket_id);
2430 if (!rxq->sw_ring) {
2431 txgbe_rx_queue_release(rxq);
2436 * Always allocate even if it's not going to be needed in order to
2437 * simplify the code.
2439 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2440 * be requested in txgbe_dev_rx_init(), which is called later from
2444 rte_zmalloc_socket("rxq->sw_sc_ring",
2445 sizeof(struct txgbe_scattered_rx_entry) * len,
2446 RTE_CACHE_LINE_SIZE, socket_id);
2447 if (!rxq->sw_sc_ring) {
2448 txgbe_rx_queue_release(rxq);
2452 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2453 "dma_addr=0x%" PRIx64,
2454 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2455 rxq->rx_ring_phys_addr);
2457 dev->data->rx_queues[queue_idx] = rxq;
2459 txgbe_reset_rx_queue(adapter, rxq);
2465 txgbe_dev_free_queues(struct rte_eth_dev *dev)
2469 PMD_INIT_FUNC_TRACE();
2471 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2472 txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
2473 dev->data->rx_queues[i] = NULL;
2475 dev->data->nb_rx_queues = 0;
2477 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2478 txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
2479 dev->data->tx_queues[i] = NULL;
2481 dev->data->nb_tx_queues = 0;
2484 static int __rte_cold
2485 txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
2487 struct txgbe_rx_entry *rxe = rxq->sw_ring;
2491 /* Initialize software ring entries */
2492 for (i = 0; i < rxq->nb_rx_desc; i++) {
2493 volatile struct txgbe_rx_desc *rxd;
2494 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
2497 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
2498 (unsigned int)rxq->queue_id);
2502 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
2503 mbuf->port = rxq->port_id;
2506 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
2507 rxd = &rxq->rx_ring[i];
2508 TXGBE_RXD_HDRADDR(rxd, 0);
2509 TXGBE_RXD_PKTADDR(rxd, dma_addr);
2517 * txgbe_get_rscctl_maxdesc
2519 * @pool Memory pool of the Rx queue
2521 static inline uint32_t
2522 txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
2524 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
2527 RTE_IPV4_MAX_PKT_LEN /
2528 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
2531 return TXGBE_RXCFG_RSCMAX_16;
2532 else if (maxdesc >= 8)
2533 return TXGBE_RXCFG_RSCMAX_8;
2534 else if (maxdesc >= 4)
2535 return TXGBE_RXCFG_RSCMAX_4;
2537 return TXGBE_RXCFG_RSCMAX_1;
2541 * txgbe_set_rsc - configure RSC related port HW registers
2543 * Configures the port's RSC related registers.
2547 * Returns 0 in case of success or a non-zero error code
2550 txgbe_set_rsc(struct rte_eth_dev *dev)
2552 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
2553 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2554 struct rte_eth_dev_info dev_info = { 0 };
2555 bool rsc_capable = false;
2561 dev->dev_ops->dev_infos_get(dev, &dev_info);
2562 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
2565 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
2566 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
2571 /* RSC global configuration */
2573 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
2574 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
2575 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
2580 rfctl = rd32(hw, TXGBE_PSRCTL);
2581 if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
2582 rfctl &= ~TXGBE_PSRCTL_RSCDIA;
2584 rfctl |= TXGBE_PSRCTL_RSCDIA;
2585 wr32(hw, TXGBE_PSRCTL, rfctl);
2587 /* If LRO hasn't been requested - we are done here. */
2588 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
2591 /* Set PSRCTL.RSCACK bit */
2592 rdrxctl = rd32(hw, TXGBE_PSRCTL);
2593 rdrxctl |= TXGBE_PSRCTL_RSCACK;
2594 wr32(hw, TXGBE_PSRCTL, rdrxctl);
2596 /* Per-queue RSC configuration */
2597 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2598 struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
2600 rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
2602 rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
2604 rd32(hw, TXGBE_ITR(rxq->reg_idx));
2607 * txgbe PMD doesn't support header-split at the moment.
2609 srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
2610 srrctl |= TXGBE_RXCFG_HDRLEN(128);
2613 * TODO: Consider setting the Receive Descriptor Minimum
2614 * Threshold Size for an RSC case. This is not an obviously
2615 * beneficiary option but the one worth considering...
2618 srrctl |= TXGBE_RXCFG_RSCENA;
2619 srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
2620 srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
2621 psrtype |= TXGBE_POOLRSS_L4HDR;
2624 * RSC: Set ITR interval corresponding to 2K ints/s.
2626 * Full-sized RSC aggregations for a 10Gb/s link will
2627 * arrive at about 20K aggregation/s rate.
2629 * 2K inst/s rate will make only 10% of the
2630 * aggregations to be closed due to the interrupt timer
2631 * expiration for a streaming at wire-speed case.
2633 * For a sparse streaming case this setting will yield
2634 * at most 500us latency for a single RSC aggregation.
2636 eitr &= ~TXGBE_ITR_IVAL_MASK;
2637 eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
2638 eitr |= TXGBE_ITR_WRDSA;
2640 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
2641 wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
2642 wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
2645 * RSC requires the mapping of the queue to the
2648 txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
2653 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
2659 txgbe_set_rx_function(struct rte_eth_dev *dev)
2661 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2664 * Initialize the appropriate LRO callback.
2666 * If all queues satisfy the bulk allocation preconditions
2667 * (adapter->rx_bulk_alloc_allowed is TRUE) then we may use
2668 * bulk allocation. Otherwise use a single allocation version.
2670 if (dev->data->lro) {
2671 if (adapter->rx_bulk_alloc_allowed) {
2672 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
2673 "allocation version");
2674 dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
2676 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
2677 "allocation version");
2678 dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
2680 } else if (dev->data->scattered_rx) {
2682 * Set the non-LRO scattered callback: there are bulk and
2683 * single allocation versions.
2685 if (adapter->rx_bulk_alloc_allowed) {
2686 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
2687 "allocation callback (port=%d).",
2688 dev->data->port_id);
2689 dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
2691 PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
2692 "single allocation) "
2693 "Scattered Rx callback "
2695 dev->data->port_id);
2697 dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
2700 * Below we set "simple" callbacks according to port/queues parameters.
2701 * If parameters allow we are going to choose between the following
2704 * - Single buffer allocation (the simplest one)
2706 } else if (adapter->rx_bulk_alloc_allowed) {
2707 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
2708 "satisfied. Rx Burst Bulk Alloc function "
2709 "will be used on port=%d.",
2710 dev->data->port_id);
2712 dev->rx_pkt_burst = txgbe_recv_pkts_bulk_alloc;
2714 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
2715 "satisfied, or Scattered Rx is requested "
2717 dev->data->port_id);
2719 dev->rx_pkt_burst = txgbe_recv_pkts;
2724 * Initializes Receive Unit.
2727 txgbe_dev_rx_init(struct rte_eth_dev *dev)
2729 struct txgbe_hw *hw;
2730 struct txgbe_rx_queue *rxq;
2739 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
2742 PMD_INIT_FUNC_TRACE();
2743 hw = TXGBE_DEV_HW(dev);
2746 * Make sure receives are disabled while setting
2747 * up the RX context (registers, descriptor rings, etc.).
2749 wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
2750 wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
2752 /* Enable receipt of broadcasted frames */
2753 fctrl = rd32(hw, TXGBE_PSRCTL);
2754 fctrl |= TXGBE_PSRCTL_BCA;
2755 wr32(hw, TXGBE_PSRCTL, fctrl);
2758 * Configure CRC stripping, if any.
2760 hlreg0 = rd32(hw, TXGBE_SECRXCTL);
2761 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2762 hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
2764 hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
2765 wr32(hw, TXGBE_SECRXCTL, hlreg0);
2768 * Configure jumbo frame support, if any.
2770 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
2771 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
2772 TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
2774 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
2775 TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
2779 * If loopback mode is configured, set LPBK bit.
2781 hlreg0 = rd32(hw, TXGBE_PSRCTL);
2782 if (hw->mac.type == txgbe_mac_raptor &&
2783 dev->data->dev_conf.lpbk_mode)
2784 hlreg0 |= TXGBE_PSRCTL_LBENA;
2786 hlreg0 &= ~TXGBE_PSRCTL_LBENA;
2788 wr32(hw, TXGBE_PSRCTL, hlreg0);
2791 * Assume no header split and no VLAN strip support
2792 * on any Rx queue first .
2794 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
2796 /* Setup RX queues */
2797 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2798 rxq = dev->data->rx_queues[i];
2801 * Reset crc_len in case it was changed after queue setup by a
2802 * call to configure.
2804 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2805 rxq->crc_len = RTE_ETHER_CRC_LEN;
2809 /* Setup the Base and Length of the Rx Descriptor Rings */
2810 bus_addr = rxq->rx_ring_phys_addr;
2811 wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
2812 (uint32_t)(bus_addr & BIT_MASK32));
2813 wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
2814 (uint32_t)(bus_addr >> 32));
2815 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
2816 wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
2818 srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
2820 /* Set if packets are dropped when no descriptors available */
2822 srrctl |= TXGBE_RXCFG_DROP;
2825 * Configure the RX buffer size in the PKTLEN field of
2826 * the RXCFG register of the queue.
2827 * The value is in 1 KB resolution. Valid values can be from
2830 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
2831 RTE_PKTMBUF_HEADROOM);
2832 buf_size = ROUND_UP(buf_size, 0x1 << 10);
2833 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
2835 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
2837 /* It adds dual VLAN length for supporting dual VLAN */
2838 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
2839 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
2840 dev->data->scattered_rx = 1;
2841 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
2842 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
2845 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
2846 dev->data->scattered_rx = 1;
2849 * Setup the Checksum Register.
2850 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
2851 * Enable IP/L4 checksum computation by hardware if requested to do so.
2853 rxcsum = rd32(hw, TXGBE_PSRCTL);
2854 rxcsum |= TXGBE_PSRCTL_PCSD;
2855 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
2856 rxcsum |= TXGBE_PSRCTL_L4CSUM;
2858 rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
2860 wr32(hw, TXGBE_PSRCTL, rxcsum);
2862 if (hw->mac.type == txgbe_mac_raptor) {
2863 rdrxctl = rd32(hw, TXGBE_SECRXCTL);
2864 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
2865 rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
2867 rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
2868 wr32(hw, TXGBE_SECRXCTL, rdrxctl);
2871 rc = txgbe_set_rsc(dev);
2875 txgbe_set_rx_function(dev);
2881 * Initializes Transmit Unit.
2884 txgbe_dev_tx_init(struct rte_eth_dev *dev)
2886 struct txgbe_hw *hw;
2887 struct txgbe_tx_queue *txq;
2891 PMD_INIT_FUNC_TRACE();
2892 hw = TXGBE_DEV_HW(dev);
2894 /* Setup the Base and Length of the Tx Descriptor Rings */
2895 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2896 txq = dev->data->tx_queues[i];
2898 bus_addr = txq->tx_ring_phys_addr;
2899 wr32(hw, TXGBE_TXBAL(txq->reg_idx),
2900 (uint32_t)(bus_addr & BIT_MASK32));
2901 wr32(hw, TXGBE_TXBAH(txq->reg_idx),
2902 (uint32_t)(bus_addr >> 32));
2903 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
2904 TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
2905 /* Setup the HW Tx Head and TX Tail descriptor pointers */
2906 wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
2907 wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
2912 txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
2914 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
2915 *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
2916 *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
2917 *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
2921 txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
2923 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
2924 wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
2925 wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
2926 wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
2930 txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
2932 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
2933 *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
2934 *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
2935 *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
2939 txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
2941 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
2942 wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
2943 wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
2944 wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
2948 * Start Receive Units for specified queue.
2951 txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2953 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2954 struct txgbe_rx_queue *rxq;
2958 PMD_INIT_FUNC_TRACE();
2960 rxq = dev->data->rx_queues[rx_queue_id];
2962 /* Allocate buffers for descriptor rings */
2963 if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
2964 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
2968 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
2969 rxdctl |= TXGBE_RXCFG_ENA;
2970 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
2972 /* Wait until RX Enable ready */
2973 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
2976 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
2977 } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
2979 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
2981 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
2982 wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
2983 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2989 * Stop Receive Units for specified queue.
2992 txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2994 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2995 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2996 struct txgbe_rx_queue *rxq;
3000 PMD_INIT_FUNC_TRACE();
3002 rxq = dev->data->rx_queues[rx_queue_id];
3004 txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
3005 wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
3007 /* Wait until RX Enable bit clear */
3008 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
3011 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
3012 } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
3014 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
3016 rte_delay_us(RTE_TXGBE_WAIT_100_US);
3017 txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
3019 txgbe_rx_queue_release_mbufs(rxq);
3020 txgbe_reset_rx_queue(adapter, rxq);
3021 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
3027 * Start Transmit Units for specified queue.
3030 txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3032 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3033 struct txgbe_tx_queue *txq;
3037 PMD_INIT_FUNC_TRACE();
3039 txq = dev->data->tx_queues[tx_queue_id];
3040 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
3042 /* Wait until TX Enable ready */
3043 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
3046 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
3047 } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
3049 PMD_INIT_LOG(ERR, "Could not enable "
3050 "Tx Queue %d", tx_queue_id);
3053 wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
3054 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
3060 * Stop Transmit Units for specified queue.
3063 txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
3065 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3066 struct txgbe_tx_queue *txq;
3068 uint32_t txtdh, txtdt;
3071 PMD_INIT_FUNC_TRACE();
3073 txq = dev->data->tx_queues[tx_queue_id];
3075 /* Wait until TX queue is empty */
3076 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
3078 rte_delay_us(RTE_TXGBE_WAIT_100_US);
3079 txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
3080 txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
3081 } while (--poll_ms && (txtdh != txtdt));
3084 "Tx Queue %d is not empty when stopping.",
3087 txgbe_dev_save_tx_queue(hw, txq->reg_idx);
3088 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
3090 /* Wait until TX Enable bit clear */
3091 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
3094 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
3095 } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
3097 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
3100 rte_delay_us(RTE_TXGBE_WAIT_100_US);
3101 txgbe_dev_store_tx_queue(hw, txq->reg_idx);
3103 if (txq->ops != NULL) {
3104 txq->ops->release_mbufs(txq);
3105 txq->ops->reset(txq);
3107 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;