1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
12 #include <rte_common.h>
13 #include <rte_cycles.h>
15 #include <rte_debug.h>
16 #include <rte_ethdev.h>
17 #include <rte_ethdev_driver.h>
18 #include <rte_memzone.h>
19 #include <rte_mempool.h>
20 #include <rte_malloc.h>
25 #include "txgbe_logs.h"
26 #include "base/txgbe.h"
27 #include "txgbe_ethdev.h"
28 #include "txgbe_rxtx.h"
30 /* Bit Mask to indicate what bits required for building TX context */
31 static const u64 TXGBE_TX_OFFLOAD_MASK = (PKT_TX_IP_CKSUM |
40 PKT_TX_OUTER_IP_CKSUM);
42 #define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
43 (PKT_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
46 txgbe_is_vf(struct rte_eth_dev *dev)
48 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
50 switch (hw->mac.type) {
51 case txgbe_mac_raptor_vf:
58 /*********************************************************************
62 **********************************************************************/
65 * Check for descriptors with their DD bit set and free mbufs.
66 * Return the total number of buffers freed.
68 static __rte_always_inline int
69 txgbe_tx_free_bufs(struct txgbe_tx_queue *txq)
71 struct txgbe_tx_entry *txep;
74 struct rte_mbuf *m, *free[RTE_TXGBE_TX_MAX_FREE_BUF_SZ];
76 /* check DD bit on threshold descriptor */
77 status = txq->tx_ring[txq->tx_next_dd].dw3;
78 if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
79 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
80 txgbe_set32_masked(txq->tdc_reg_addr,
81 TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
86 * first buffer to free from S/W ring is at index
87 * tx_next_dd - (tx_free_thresh-1)
89 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
90 for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
91 /* free buffers one at a time */
92 m = rte_pktmbuf_prefree_seg(txep->mbuf);
95 if (unlikely(m == NULL))
98 if (nb_free >= RTE_TXGBE_TX_MAX_FREE_BUF_SZ ||
99 (nb_free > 0 && m->pool != free[0]->pool)) {
100 rte_mempool_put_bulk(free[0]->pool,
101 (void **)free, nb_free);
109 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
111 /* buffers were freed, update counters */
112 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
113 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
114 if (txq->tx_next_dd >= txq->nb_tx_desc)
115 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
117 return txq->tx_free_thresh;
120 /* Populate 4 descriptors with data from 4 mbufs */
122 tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
124 uint64_t buf_dma_addr;
128 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
129 buf_dma_addr = rte_mbuf_data_iova(*pkts);
130 pkt_len = (*pkts)->data_len;
132 /* write data to descriptor */
133 txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
134 txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
135 TXGBE_TXD_DATLEN(pkt_len));
136 txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
138 rte_prefetch0(&(*pkts)->pool);
142 /* Populate 1 descriptor with data from 1 mbuf */
144 tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
146 uint64_t buf_dma_addr;
149 buf_dma_addr = rte_mbuf_data_iova(*pkts);
150 pkt_len = (*pkts)->data_len;
152 /* write data to descriptor */
153 txdp->qw0 = cpu_to_le64(buf_dma_addr);
154 txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
155 TXGBE_TXD_DATLEN(pkt_len));
156 txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
158 rte_prefetch0(&(*pkts)->pool);
162 * Fill H/W descriptor ring with mbuf data.
163 * Copy mbuf pointers to the S/W ring.
166 txgbe_tx_fill_hw_ring(struct txgbe_tx_queue *txq, struct rte_mbuf **pkts,
169 volatile struct txgbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
170 struct txgbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
171 const int N_PER_LOOP = 4;
172 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
173 int mainpart, leftover;
177 * Process most of the packets in chunks of N pkts. Any
178 * leftover packets will get processed one at a time.
180 mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
181 leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
182 for (i = 0; i < mainpart; i += N_PER_LOOP) {
183 /* Copy N mbuf pointers to the S/W ring */
184 for (j = 0; j < N_PER_LOOP; ++j)
185 (txep + i + j)->mbuf = *(pkts + i + j);
186 tx4(txdp + i, pkts + i);
189 if (unlikely(leftover > 0)) {
190 for (i = 0; i < leftover; ++i) {
191 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
192 tx1(txdp + mainpart + i, pkts + mainpart + i);
197 static inline uint16_t
198 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
201 struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
205 * Begin scanning the H/W ring for done descriptors when the
206 * number of available descriptors drops below tx_free_thresh. For
207 * each done descriptor, free the associated buffer.
209 if (txq->nb_tx_free < txq->tx_free_thresh)
210 txgbe_tx_free_bufs(txq);
212 /* Only use descriptors that are available */
213 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
214 if (unlikely(nb_pkts == 0))
217 /* Use exactly nb_pkts descriptors */
218 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
221 * At this point, we know there are enough descriptors in the
222 * ring to transmit all the packets. This assumes that each
223 * mbuf contains a single segment, and that no new offloads
224 * are expected, which would require a new context descriptor.
228 * See if we're going to wrap-around. If so, handle the top
229 * of the descriptor ring first, then do the bottom. If not,
230 * the processing looks just like the "bottom" part anyway...
232 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
233 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
234 txgbe_tx_fill_hw_ring(txq, tx_pkts, n);
238 /* Fill H/W descriptor ring with mbuf data */
239 txgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
240 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
243 * Check for wrap-around. This would only happen if we used
244 * up to the last descriptor in the ring, no more, no less.
246 if (txq->tx_tail >= txq->nb_tx_desc)
249 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
250 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
251 (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
253 /* update tail pointer */
255 txgbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
261 txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
266 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
267 if (likely(nb_pkts <= RTE_PMD_TXGBE_TX_MAX_BURST))
268 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
270 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
275 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_TX_MAX_BURST);
276 ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
277 nb_tx = (uint16_t)(nb_tx + ret);
278 nb_pkts = (uint16_t)(nb_pkts - ret);
287 txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
288 volatile struct txgbe_tx_ctx_desc *ctx_txd,
289 uint64_t ol_flags, union txgbe_tx_offload tx_offload)
291 union txgbe_tx_offload tx_offload_mask;
292 uint32_t type_tucmd_mlhl;
293 uint32_t mss_l4len_idx;
295 uint32_t vlan_macip_lens;
296 uint32_t tunnel_seed;
298 ctx_idx = txq->ctx_curr;
299 tx_offload_mask.data[0] = 0;
300 tx_offload_mask.data[1] = 0;
302 /* Specify which HW CTX to upload. */
303 mss_l4len_idx = TXGBE_TXD_IDX(ctx_idx);
304 type_tucmd_mlhl = TXGBE_TXD_CTXT;
306 tx_offload_mask.ptid |= ~0;
307 type_tucmd_mlhl |= TXGBE_TXD_PTID(tx_offload.ptid);
309 /* check if TCP segmentation required for this packet */
310 if (ol_flags & PKT_TX_TCP_SEG) {
311 tx_offload_mask.l2_len |= ~0;
312 tx_offload_mask.l3_len |= ~0;
313 tx_offload_mask.l4_len |= ~0;
314 tx_offload_mask.tso_segsz |= ~0;
315 mss_l4len_idx |= TXGBE_TXD_MSS(tx_offload.tso_segsz);
316 mss_l4len_idx |= TXGBE_TXD_L4LEN(tx_offload.l4_len);
317 } else { /* no TSO, check if hardware checksum is needed */
318 if (ol_flags & PKT_TX_IP_CKSUM) {
319 tx_offload_mask.l2_len |= ~0;
320 tx_offload_mask.l3_len |= ~0;
323 switch (ol_flags & PKT_TX_L4_MASK) {
324 case PKT_TX_UDP_CKSUM:
326 TXGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
327 tx_offload_mask.l2_len |= ~0;
328 tx_offload_mask.l3_len |= ~0;
330 case PKT_TX_TCP_CKSUM:
332 TXGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
333 tx_offload_mask.l2_len |= ~0;
334 tx_offload_mask.l3_len |= ~0;
336 case PKT_TX_SCTP_CKSUM:
338 TXGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
339 tx_offload_mask.l2_len |= ~0;
340 tx_offload_mask.l3_len |= ~0;
347 vlan_macip_lens = TXGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
349 if (ol_flags & PKT_TX_TUNNEL_MASK) {
350 tx_offload_mask.outer_tun_len |= ~0;
351 tx_offload_mask.outer_l2_len |= ~0;
352 tx_offload_mask.outer_l3_len |= ~0;
353 tx_offload_mask.l2_len |= ~0;
354 tunnel_seed = TXGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
355 tunnel_seed |= TXGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
357 switch (ol_flags & PKT_TX_TUNNEL_MASK) {
358 case PKT_TX_TUNNEL_IPIP:
359 /* for non UDP / GRE tunneling, set to 0b */
361 case PKT_TX_TUNNEL_VXLAN:
362 case PKT_TX_TUNNEL_GENEVE:
363 tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
365 case PKT_TX_TUNNEL_GRE:
366 tunnel_seed |= TXGBE_TXD_ETYPE_GRE;
369 PMD_TX_LOG(ERR, "Tunnel type not supported");
372 vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.outer_l2_len);
375 vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
378 if (ol_flags & PKT_TX_VLAN_PKT) {
379 tx_offload_mask.vlan_tci |= ~0;
380 vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
383 txq->ctx_cache[ctx_idx].flags = ol_flags;
384 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
385 tx_offload_mask.data[0] & tx_offload.data[0];
386 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
387 tx_offload_mask.data[1] & tx_offload.data[1];
388 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
390 ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
391 ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
392 ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
393 ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
397 * Check which hardware context can be used. Use the existing match
398 * or create a new context descriptor.
400 static inline uint32_t
401 what_ctx_update(struct txgbe_tx_queue *txq, uint64_t flags,
402 union txgbe_tx_offload tx_offload)
404 /* If match with the current used context */
405 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
406 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
407 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
408 & tx_offload.data[0])) &&
409 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
410 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
411 & tx_offload.data[1]))))
412 return txq->ctx_curr;
414 /* What if match with the next context */
416 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
417 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
418 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
419 & tx_offload.data[0])) &&
420 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
421 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
422 & tx_offload.data[1]))))
423 return txq->ctx_curr;
425 /* Mismatch, use the previous context */
426 return TXGBE_CTX_NUM;
429 static inline uint32_t
430 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
434 if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) {
436 tmp |= TXGBE_TXD_L4CS;
438 if (ol_flags & PKT_TX_IP_CKSUM) {
440 tmp |= TXGBE_TXD_IPCS;
442 if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
444 tmp |= TXGBE_TXD_EIPCS;
446 if (ol_flags & PKT_TX_TCP_SEG) {
448 /* implies IPv4 cksum */
449 if (ol_flags & PKT_TX_IPV4)
450 tmp |= TXGBE_TXD_IPCS;
451 tmp |= TXGBE_TXD_L4CS;
453 if (ol_flags & PKT_TX_VLAN_PKT)
459 static inline uint32_t
460 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
462 uint32_t cmdtype = 0;
464 if (ol_flags & PKT_TX_VLAN_PKT)
465 cmdtype |= TXGBE_TXD_VLE;
466 if (ol_flags & PKT_TX_TCP_SEG)
467 cmdtype |= TXGBE_TXD_TSE;
468 if (ol_flags & PKT_TX_MACSEC)
469 cmdtype |= TXGBE_TXD_LINKSEC;
473 static inline uint8_t
474 tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
479 return txgbe_encode_ptype(ptype);
481 /* Only support flags in TXGBE_TX_OFFLOAD_MASK */
482 tun = !!(oflags & PKT_TX_TUNNEL_MASK);
485 ptype = RTE_PTYPE_L2_ETHER;
486 if (oflags & PKT_TX_VLAN)
487 ptype |= RTE_PTYPE_L2_ETHER_VLAN;
490 if (oflags & (PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM))
491 ptype |= RTE_PTYPE_L3_IPV4;
492 else if (oflags & (PKT_TX_OUTER_IPV6))
493 ptype |= RTE_PTYPE_L3_IPV6;
495 if (oflags & (PKT_TX_IPV4 | PKT_TX_IP_CKSUM))
496 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
497 else if (oflags & (PKT_TX_IPV6))
498 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
501 switch (oflags & (PKT_TX_L4_MASK)) {
502 case PKT_TX_TCP_CKSUM:
503 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
505 case PKT_TX_UDP_CKSUM:
506 ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
508 case PKT_TX_SCTP_CKSUM:
509 ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
513 if (oflags & PKT_TX_TCP_SEG)
514 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
517 switch (oflags & PKT_TX_TUNNEL_MASK) {
518 case PKT_TX_TUNNEL_VXLAN:
519 ptype |= RTE_PTYPE_L2_ETHER |
521 RTE_PTYPE_TUNNEL_VXLAN;
522 ptype |= RTE_PTYPE_INNER_L2_ETHER;
524 case PKT_TX_TUNNEL_GRE:
525 ptype |= RTE_PTYPE_L2_ETHER |
527 RTE_PTYPE_TUNNEL_GRE;
528 ptype |= RTE_PTYPE_INNER_L2_ETHER;
530 case PKT_TX_TUNNEL_GENEVE:
531 ptype |= RTE_PTYPE_L2_ETHER |
533 RTE_PTYPE_TUNNEL_GENEVE;
534 ptype |= RTE_PTYPE_INNER_L2_ETHER;
536 case PKT_TX_TUNNEL_VXLAN_GPE:
537 ptype |= RTE_PTYPE_L2_ETHER |
539 RTE_PTYPE_TUNNEL_VXLAN_GPE;
540 ptype |= RTE_PTYPE_INNER_L2_ETHER;
542 case PKT_TX_TUNNEL_IPIP:
543 case PKT_TX_TUNNEL_IP:
544 ptype |= RTE_PTYPE_L2_ETHER |
550 return txgbe_encode_ptype(ptype);
553 #ifndef DEFAULT_TX_FREE_THRESH
554 #define DEFAULT_TX_FREE_THRESH 32
557 /* Reset transmit descriptors after they have been used */
559 txgbe_xmit_cleanup(struct txgbe_tx_queue *txq)
561 struct txgbe_tx_entry *sw_ring = txq->sw_ring;
562 volatile struct txgbe_tx_desc *txr = txq->tx_ring;
563 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
564 uint16_t nb_tx_desc = txq->nb_tx_desc;
565 uint16_t desc_to_clean_to;
566 uint16_t nb_tx_to_clean;
569 /* Determine the last descriptor needing to be cleaned */
570 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
571 if (desc_to_clean_to >= nb_tx_desc)
572 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
574 /* Check to make sure the last descriptor to clean is done */
575 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
576 status = txr[desc_to_clean_to].dw3;
577 if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
578 PMD_TX_FREE_LOG(DEBUG,
579 "TX descriptor %4u is not done"
580 "(port=%d queue=%d)",
582 txq->port_id, txq->queue_id);
583 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
584 txgbe_set32_masked(txq->tdc_reg_addr,
585 TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
586 /* Failed to clean any descriptors, better luck next time */
590 /* Figure out how many descriptors will be cleaned */
591 if (last_desc_cleaned > desc_to_clean_to)
592 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
595 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
598 PMD_TX_FREE_LOG(DEBUG,
599 "Cleaning %4u TX descriptors: %4u to %4u "
600 "(port=%d queue=%d)",
601 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
602 txq->port_id, txq->queue_id);
605 * The last descriptor to clean is done, so that means all the
606 * descriptors from the last descriptor that was cleaned
607 * up to the last descriptor with the RS bit set
608 * are done. Only reset the threshold descriptor.
610 txr[desc_to_clean_to].dw3 = 0;
612 /* Update the txq to reflect the last descriptor that was cleaned */
613 txq->last_desc_cleaned = desc_to_clean_to;
614 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
620 static inline uint8_t
621 txgbe_get_tun_len(struct rte_mbuf *mbuf)
623 struct txgbe_genevehdr genevehdr;
624 const struct txgbe_genevehdr *gh;
627 switch (mbuf->ol_flags & PKT_TX_TUNNEL_MASK) {
628 case PKT_TX_TUNNEL_IPIP:
631 case PKT_TX_TUNNEL_VXLAN:
632 case PKT_TX_TUNNEL_VXLAN_GPE:
633 tun_len = sizeof(struct txgbe_udphdr)
634 + sizeof(struct txgbe_vxlanhdr);
636 case PKT_TX_TUNNEL_GRE:
637 tun_len = sizeof(struct txgbe_nvgrehdr);
639 case PKT_TX_TUNNEL_GENEVE:
640 gh = rte_pktmbuf_read(mbuf,
641 mbuf->outer_l2_len + mbuf->outer_l3_len,
642 sizeof(genevehdr), &genevehdr);
643 tun_len = sizeof(struct txgbe_udphdr)
644 + sizeof(struct txgbe_genevehdr)
645 + (gh->opt_len << 2);
655 txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
658 struct txgbe_tx_queue *txq;
659 struct txgbe_tx_entry *sw_ring;
660 struct txgbe_tx_entry *txe, *txn;
661 volatile struct txgbe_tx_desc *txr;
662 volatile struct txgbe_tx_desc *txd;
663 struct rte_mbuf *tx_pkt;
664 struct rte_mbuf *m_seg;
665 uint64_t buf_dma_addr;
666 uint32_t olinfo_status;
667 uint32_t cmd_type_len;
678 union txgbe_tx_offload tx_offload;
680 tx_offload.data[0] = 0;
681 tx_offload.data[1] = 0;
683 sw_ring = txq->sw_ring;
685 tx_id = txq->tx_tail;
686 txe = &sw_ring[tx_id];
688 /* Determine if the descriptor ring needs to be cleaned. */
689 if (txq->nb_tx_free < txq->tx_free_thresh)
690 txgbe_xmit_cleanup(txq);
692 rte_prefetch0(&txe->mbuf->pool);
695 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
698 pkt_len = tx_pkt->pkt_len;
701 * Determine how many (if any) context descriptors
702 * are needed for offload functionality.
704 ol_flags = tx_pkt->ol_flags;
706 /* If hardware offload required */
707 tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
709 tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
710 tx_pkt->packet_type);
711 tx_offload.l2_len = tx_pkt->l2_len;
712 tx_offload.l3_len = tx_pkt->l3_len;
713 tx_offload.l4_len = tx_pkt->l4_len;
714 tx_offload.vlan_tci = tx_pkt->vlan_tci;
715 tx_offload.tso_segsz = tx_pkt->tso_segsz;
716 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
717 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
718 tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
720 /* If new context need be built or reuse the exist ctx*/
721 ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
722 /* Only allocate context descriptor if required */
723 new_ctx = (ctx == TXGBE_CTX_NUM);
728 * Keep track of how many descriptors are used this loop
729 * This will always be the number of segments + the number of
730 * Context descriptors required to transmit the packet
732 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
735 * The number of descriptors that must be allocated for a
736 * packet is the number of segments of that packet, plus 1
737 * Context Descriptor for the hardware offload, if any.
738 * Determine the last TX descriptor to allocate in the TX ring
739 * for the packet, starting from the current position (tx_id)
742 tx_last = (uint16_t)(tx_id + nb_used - 1);
745 if (tx_last >= txq->nb_tx_desc)
746 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
748 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
749 " tx_first=%u tx_last=%u",
750 (uint16_t)txq->port_id,
751 (uint16_t)txq->queue_id,
757 * Make sure there are enough TX descriptors available to
758 * transmit the entire packet.
759 * nb_used better be less than or equal to txq->tx_free_thresh
761 if (nb_used > txq->nb_tx_free) {
762 PMD_TX_FREE_LOG(DEBUG,
763 "Not enough free TX descriptors "
764 "nb_used=%4u nb_free=%4u "
765 "(port=%d queue=%d)",
766 nb_used, txq->nb_tx_free,
767 txq->port_id, txq->queue_id);
769 if (txgbe_xmit_cleanup(txq) != 0) {
770 /* Could not clean any descriptors */
776 /* nb_used better be <= txq->tx_free_thresh */
777 if (unlikely(nb_used > txq->tx_free_thresh)) {
778 PMD_TX_FREE_LOG(DEBUG,
779 "The number of descriptors needed to "
780 "transmit the packet exceeds the "
781 "RS bit threshold. This will impact "
783 "nb_used=%4u nb_free=%4u "
784 "tx_free_thresh=%4u. "
785 "(port=%d queue=%d)",
786 nb_used, txq->nb_tx_free,
788 txq->port_id, txq->queue_id);
790 * Loop here until there are enough TX
791 * descriptors or until the ring cannot be
794 while (nb_used > txq->nb_tx_free) {
795 if (txgbe_xmit_cleanup(txq) != 0) {
797 * Could not clean any
809 * By now there are enough free TX descriptors to transmit
814 * Set common flags of all TX Data Descriptors.
816 * The following bits must be set in all Data Descriptors:
817 * - TXGBE_TXD_DTYP_DATA
818 * - TXGBE_TXD_DCMD_DEXT
820 * The following bits must be set in the first Data Descriptor
821 * and are ignored in the other ones:
822 * - TXGBE_TXD_DCMD_IFCS
823 * - TXGBE_TXD_MAC_1588
824 * - TXGBE_TXD_DCMD_VLE
826 * The following bits must only be set in the last Data
828 * - TXGBE_TXD_CMD_EOP
830 * The following bits can be set in any Data Descriptor, but
831 * are only set in the last Data Descriptor:
834 cmd_type_len = TXGBE_TXD_FCS;
838 if (ol_flags & PKT_TX_TCP_SEG) {
839 /* when TSO is on, paylen in descriptor is the
840 * not the packet len but the tcp payload len
842 pkt_len -= (tx_offload.l2_len +
843 tx_offload.l3_len + tx_offload.l4_len);
845 (tx_pkt->ol_flags & PKT_TX_TUNNEL_MASK)
846 ? tx_offload.outer_l2_len +
847 tx_offload.outer_l3_len : 0;
851 * Setup the TX Advanced Context Descriptor if required
854 volatile struct txgbe_tx_ctx_desc *ctx_txd;
856 ctx_txd = (volatile struct txgbe_tx_ctx_desc *)
859 txn = &sw_ring[txe->next_id];
860 rte_prefetch0(&txn->mbuf->pool);
862 if (txe->mbuf != NULL) {
863 rte_pktmbuf_free_seg(txe->mbuf);
867 txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
870 txe->last_id = tx_last;
871 tx_id = txe->next_id;
876 * Setup the TX Advanced Data Descriptor,
877 * This path will go through
878 * whatever new/reuse the context descriptor
880 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
882 tx_desc_cksum_flags_to_olinfo(ol_flags);
883 olinfo_status |= TXGBE_TXD_IDX(ctx);
886 olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
891 txn = &sw_ring[txe->next_id];
892 rte_prefetch0(&txn->mbuf->pool);
894 if (txe->mbuf != NULL)
895 rte_pktmbuf_free_seg(txe->mbuf);
899 * Set up Transmit Data Descriptor.
901 slen = m_seg->data_len;
902 buf_dma_addr = rte_mbuf_data_iova(m_seg);
903 txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
904 txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
905 txd->dw3 = rte_cpu_to_le_32(olinfo_status);
906 txe->last_id = tx_last;
907 tx_id = txe->next_id;
910 } while (m_seg != NULL);
913 * The last packet data descriptor needs End Of Packet (EOP)
915 cmd_type_len |= TXGBE_TXD_EOP;
916 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
918 txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
926 * Set the Transmit Descriptor Tail (TDT)
928 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
929 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
930 (uint16_t)tx_id, (uint16_t)nb_tx);
931 txgbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
932 txq->tx_tail = tx_id;
937 /*********************************************************************
941 **********************************************************************/
943 txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
948 struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
950 for (i = 0; i < nb_pkts; i++) {
952 ol_flags = m->ol_flags;
955 * Check if packet meets requirements for number of segments
957 * NOTE: for txgbe it's always (40 - WTHRESH) for both TSO and
961 if (m->nb_segs > TXGBE_TX_MAX_SEG - txq->wthresh) {
966 if (ol_flags & TXGBE_TX_OFFLOAD_NOTSUP_MASK) {
967 rte_errno = -ENOTSUP;
971 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
972 ret = rte_validate_tx_offload(m);
978 ret = rte_net_intel_cksum_prepare(m);
989 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
991 return DEV_RX_OFFLOAD_VLAN_STRIP;
995 txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
998 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
999 struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
1001 offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
1002 DEV_RX_OFFLOAD_UDP_CKSUM |
1003 DEV_RX_OFFLOAD_TCP_CKSUM |
1004 DEV_RX_OFFLOAD_KEEP_CRC |
1005 DEV_RX_OFFLOAD_JUMBO_FRAME |
1006 DEV_RX_OFFLOAD_VLAN_FILTER |
1007 DEV_RX_OFFLOAD_RSS_HASH |
1008 DEV_RX_OFFLOAD_SCATTER;
1010 if (!txgbe_is_vf(dev))
1011 offloads |= (DEV_RX_OFFLOAD_VLAN_FILTER |
1012 DEV_RX_OFFLOAD_QINQ_STRIP |
1013 DEV_RX_OFFLOAD_VLAN_EXTEND);
1016 * RSC is only supported by PF devices in a non-SR-IOV
1019 if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
1020 offloads |= DEV_RX_OFFLOAD_TCP_LRO;
1022 if (hw->mac.type == txgbe_mac_raptor)
1023 offloads |= DEV_RX_OFFLOAD_MACSEC_STRIP;
1025 offloads |= DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1030 static void __rte_cold
1031 txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
1035 if (txq->sw_ring != NULL) {
1036 for (i = 0; i < txq->nb_tx_desc; i++) {
1037 if (txq->sw_ring[i].mbuf != NULL) {
1038 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
1039 txq->sw_ring[i].mbuf = NULL;
1045 static void __rte_cold
1046 txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
1049 txq->sw_ring != NULL)
1050 rte_free(txq->sw_ring);
1053 static void __rte_cold
1054 txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
1056 if (txq != NULL && txq->ops != NULL) {
1057 txq->ops->release_mbufs(txq);
1058 txq->ops->free_swring(txq);
1064 txgbe_dev_tx_queue_release(void *txq)
1066 txgbe_tx_queue_release(txq);
1069 static const struct txgbe_txq_ops def_txq_ops = {
1070 .release_mbufs = txgbe_tx_queue_release_mbufs,
1071 .free_swring = txgbe_tx_free_swring,
1074 /* Takes an ethdev and a queue and sets up the tx function to be used based on
1075 * the queue parameters. Used in tx_queue_setup by primary process and then
1076 * in dev_init by secondary process when attaching to an existing ethdev.
1079 txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
1081 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
1082 if (txq->offloads == 0 &&
1083 txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
1084 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
1085 dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
1086 dev->tx_pkt_prepare = NULL;
1088 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
1090 " - offloads = 0x%" PRIx64,
1093 " - tx_free_thresh = %lu [RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
1094 (unsigned long)txq->tx_free_thresh,
1095 (unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
1096 dev->tx_pkt_burst = txgbe_xmit_pkts;
1097 dev->tx_pkt_prepare = txgbe_prep_pkts;
1102 txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
1110 txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
1112 uint64_t tx_offload_capa;
1115 DEV_TX_OFFLOAD_VLAN_INSERT |
1116 DEV_TX_OFFLOAD_IPV4_CKSUM |
1117 DEV_TX_OFFLOAD_UDP_CKSUM |
1118 DEV_TX_OFFLOAD_TCP_CKSUM |
1119 DEV_TX_OFFLOAD_SCTP_CKSUM |
1120 DEV_TX_OFFLOAD_TCP_TSO |
1121 DEV_TX_OFFLOAD_UDP_TSO |
1122 DEV_TX_OFFLOAD_UDP_TNL_TSO |
1123 DEV_TX_OFFLOAD_IP_TNL_TSO |
1124 DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
1125 DEV_TX_OFFLOAD_GRE_TNL_TSO |
1126 DEV_TX_OFFLOAD_IPIP_TNL_TSO |
1127 DEV_TX_OFFLOAD_GENEVE_TNL_TSO |
1128 DEV_TX_OFFLOAD_MULTI_SEGS;
1130 if (!txgbe_is_vf(dev))
1131 tx_offload_capa |= DEV_TX_OFFLOAD_QINQ_INSERT;
1133 tx_offload_capa |= DEV_TX_OFFLOAD_MACSEC_INSERT;
1135 tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
1137 return tx_offload_capa;
1141 txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
1144 unsigned int socket_id,
1145 const struct rte_eth_txconf *tx_conf)
1147 const struct rte_memzone *tz;
1148 struct txgbe_tx_queue *txq;
1149 struct txgbe_hw *hw;
1150 uint16_t tx_free_thresh;
1153 PMD_INIT_FUNC_TRACE();
1154 hw = TXGBE_DEV_HW(dev);
1156 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
1159 * Validate number of transmit descriptors.
1160 * It must not exceed hardware maximum, and must be multiple
1163 if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
1164 nb_desc > TXGBE_RING_DESC_MAX ||
1165 nb_desc < TXGBE_RING_DESC_MIN) {
1170 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
1171 * descriptors are used or if the number of descriptors required
1172 * to transmit a packet is greater than the number of free TX
1174 * One descriptor in the TX ring is used as a sentinel to avoid a
1175 * H/W race condition, hence the maximum threshold constraints.
1176 * When set to zero use default values.
1178 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1179 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
1180 if (tx_free_thresh >= (nb_desc - 3)) {
1181 PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
1182 "TX descriptors minus 3. (tx_free_thresh=%u "
1183 "port=%d queue=%d)",
1184 (unsigned int)tx_free_thresh,
1185 (int)dev->data->port_id, (int)queue_idx);
1189 if ((nb_desc % tx_free_thresh) != 0) {
1190 PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
1191 "number of TX descriptors. (tx_free_thresh=%u "
1192 "port=%d queue=%d)", (unsigned int)tx_free_thresh,
1193 (int)dev->data->port_id, (int)queue_idx);
1197 /* Free memory prior to re-allocation if needed... */
1198 if (dev->data->tx_queues[queue_idx] != NULL) {
1199 txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
1200 dev->data->tx_queues[queue_idx] = NULL;
1203 /* First allocate the tx queue data structure */
1204 txq = rte_zmalloc_socket("ethdev TX queue",
1205 sizeof(struct txgbe_tx_queue),
1206 RTE_CACHE_LINE_SIZE, socket_id);
1211 * Allocate TX ring hardware descriptors. A memzone large enough to
1212 * handle the maximum ring size is allocated in order to allow for
1213 * resizing in later calls to the queue setup function.
1215 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1216 sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
1217 TXGBE_ALIGN, socket_id);
1219 txgbe_tx_queue_release(txq);
1223 txq->nb_tx_desc = nb_desc;
1224 txq->tx_free_thresh = tx_free_thresh;
1225 txq->pthresh = tx_conf->tx_thresh.pthresh;
1226 txq->hthresh = tx_conf->tx_thresh.hthresh;
1227 txq->wthresh = tx_conf->tx_thresh.wthresh;
1228 txq->queue_id = queue_idx;
1229 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1230 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1231 txq->port_id = dev->data->port_id;
1232 txq->offloads = offloads;
1233 txq->ops = &def_txq_ops;
1234 txq->tx_deferred_start = tx_conf->tx_deferred_start;
1236 /* Modification to set tail pointer for virtual function
1237 * if vf is detected.
1239 if (hw->mac.type == txgbe_mac_raptor_vf) {
1240 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
1241 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
1243 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw,
1244 TXGBE_TXWP(txq->reg_idx));
1245 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw,
1246 TXGBE_TXCFG(txq->reg_idx));
1249 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
1250 txq->tx_ring = (struct txgbe_tx_desc *)TMZ_VADDR(tz);
1252 /* Allocate software ring */
1253 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
1254 sizeof(struct txgbe_tx_entry) * nb_desc,
1255 RTE_CACHE_LINE_SIZE, socket_id);
1256 if (txq->sw_ring == NULL) {
1257 txgbe_tx_queue_release(txq);
1260 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
1261 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
1263 /* set up scalar TX function as appropriate */
1264 txgbe_set_tx_function(dev, txq);
1266 txq->ops->reset(txq);
1268 dev->data->tx_queues[queue_idx] = txq;
1274 * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
1276 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
1277 * in the sw_rsc_ring is not set to NULL but rather points to the next
1278 * mbuf of this RSC aggregation (that has not been completed yet and still
1279 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
1280 * will just free first "nb_segs" segments of the cluster explicitly by calling
1281 * an rte_pktmbuf_free_seg().
1283 * @m scattered cluster head
1285 static void __rte_cold
1286 txgbe_free_sc_cluster(struct rte_mbuf *m)
1288 uint16_t i, nb_segs = m->nb_segs;
1289 struct rte_mbuf *next_seg;
1291 for (i = 0; i < nb_segs; i++) {
1293 rte_pktmbuf_free_seg(m);
1298 static void __rte_cold
1299 txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
1303 if (rxq->sw_ring != NULL) {
1304 for (i = 0; i < rxq->nb_rx_desc; i++) {
1305 if (rxq->sw_ring[i].mbuf != NULL) {
1306 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
1307 rxq->sw_ring[i].mbuf = NULL;
1310 if (rxq->rx_nb_avail) {
1311 for (i = 0; i < rxq->rx_nb_avail; ++i) {
1312 struct rte_mbuf *mb;
1314 mb = rxq->rx_stage[rxq->rx_next_avail + i];
1315 rte_pktmbuf_free_seg(mb);
1317 rxq->rx_nb_avail = 0;
1321 if (rxq->sw_sc_ring)
1322 for (i = 0; i < rxq->nb_rx_desc; i++)
1323 if (rxq->sw_sc_ring[i].fbuf) {
1324 txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
1325 rxq->sw_sc_ring[i].fbuf = NULL;
1329 static void __rte_cold
1330 txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
1333 txgbe_rx_queue_release_mbufs(rxq);
1334 rte_free(rxq->sw_ring);
1335 rte_free(rxq->sw_sc_ring);
1341 txgbe_dev_rx_queue_release(void *rxq)
1343 txgbe_rx_queue_release(rxq);
1347 * Check if Rx Burst Bulk Alloc function can be used.
1349 * 0: the preconditions are satisfied and the bulk allocation function
1351 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
1352 * function must be used.
1354 static inline int __rte_cold
1355 check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
1360 * Make sure the following pre-conditions are satisfied:
1361 * rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
1362 * rxq->rx_free_thresh < rxq->nb_rx_desc
1363 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
1364 * Scattered packets are not supported. This should be checked
1365 * outside of this function.
1367 if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
1368 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1369 "rxq->rx_free_thresh=%d, "
1370 "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
1371 rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
1373 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
1374 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1375 "rxq->rx_free_thresh=%d, "
1376 "rxq->nb_rx_desc=%d",
1377 rxq->rx_free_thresh, rxq->nb_rx_desc);
1379 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
1380 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
1381 "rxq->nb_rx_desc=%d, "
1382 "rxq->rx_free_thresh=%d",
1383 rxq->nb_rx_desc, rxq->rx_free_thresh);
1390 /* Reset dynamic txgbe_rx_queue fields back to defaults */
1391 static void __rte_cold
1392 txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
1394 static const struct txgbe_rx_desc zeroed_desc = {
1395 {{0}, {0} }, {{0}, {0} } };
1397 uint16_t len = rxq->nb_rx_desc;
1400 * By default, the Rx queue setup function allocates enough memory for
1401 * TXGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
1402 * extra memory at the end of the descriptor ring to be zero'd out.
1404 if (adapter->rx_bulk_alloc_allowed)
1405 /* zero out extra memory */
1406 len += RTE_PMD_TXGBE_RX_MAX_BURST;
1409 * Zero out HW ring memory. Zero out extra memory at the end of
1410 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
1411 * reads extra memory as zeros.
1413 for (i = 0; i < len; i++)
1414 rxq->rx_ring[i] = zeroed_desc;
1417 * initialize extra software ring entries. Space for these extra
1418 * entries is always allocated
1420 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
1421 for (i = rxq->nb_rx_desc; i < len; ++i)
1422 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
1424 rxq->rx_nb_avail = 0;
1425 rxq->rx_next_avail = 0;
1426 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
1428 rxq->nb_rx_hold = 0;
1429 rxq->pkt_first_seg = NULL;
1430 rxq->pkt_last_seg = NULL;
1434 txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
1437 unsigned int socket_id,
1438 const struct rte_eth_rxconf *rx_conf,
1439 struct rte_mempool *mp)
1441 const struct rte_memzone *rz;
1442 struct txgbe_rx_queue *rxq;
1443 struct txgbe_hw *hw;
1445 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
1448 PMD_INIT_FUNC_TRACE();
1449 hw = TXGBE_DEV_HW(dev);
1451 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
1454 * Validate number of receive descriptors.
1455 * It must not exceed hardware maximum, and must be multiple
1458 if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
1459 nb_desc > TXGBE_RING_DESC_MAX ||
1460 nb_desc < TXGBE_RING_DESC_MIN) {
1464 /* Free memory prior to re-allocation if needed... */
1465 if (dev->data->rx_queues[queue_idx] != NULL) {
1466 txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
1467 dev->data->rx_queues[queue_idx] = NULL;
1470 /* First allocate the rx queue data structure */
1471 rxq = rte_zmalloc_socket("ethdev RX queue",
1472 sizeof(struct txgbe_rx_queue),
1473 RTE_CACHE_LINE_SIZE, socket_id);
1477 rxq->nb_rx_desc = nb_desc;
1478 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1479 rxq->queue_id = queue_idx;
1480 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
1481 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
1482 rxq->port_id = dev->data->port_id;
1483 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1484 rxq->crc_len = RTE_ETHER_CRC_LEN;
1487 rxq->drop_en = rx_conf->rx_drop_en;
1488 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
1489 rxq->offloads = offloads;
1492 * The packet type in RX descriptor is different for different NICs.
1493 * So set different masks for different NICs.
1495 rxq->pkt_type_mask = TXGBE_PTID_MASK;
1498 * Allocate RX ring hardware descriptors. A memzone large enough to
1499 * handle the maximum ring size is allocated in order to allow for
1500 * resizing in later calls to the queue setup function.
1502 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1503 RX_RING_SZ, TXGBE_ALIGN, socket_id);
1505 txgbe_rx_queue_release(rxq);
1510 * Zero init all the descriptors in the ring.
1512 memset(rz->addr, 0, RX_RING_SZ);
1515 * Modified to setup VFRDT for Virtual Function
1517 if (hw->mac.type == txgbe_mac_raptor_vf) {
1519 TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
1521 TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
1524 TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
1526 TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
1529 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
1530 rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
1533 * Certain constraints must be met in order to use the bulk buffer
1534 * allocation Rx burst function. If any of Rx queues doesn't meet them
1535 * the feature should be disabled for the whole port.
1537 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
1538 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
1539 "preconditions - canceling the feature for "
1540 "the whole port[%d]",
1541 rxq->queue_id, rxq->port_id);
1542 adapter->rx_bulk_alloc_allowed = false;
1546 * Allocate software ring. Allow for space at the end of the
1547 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
1548 * function does not access an invalid memory region.
1551 if (adapter->rx_bulk_alloc_allowed)
1552 len += RTE_PMD_TXGBE_RX_MAX_BURST;
1554 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
1555 sizeof(struct txgbe_rx_entry) * len,
1556 RTE_CACHE_LINE_SIZE, socket_id);
1557 if (!rxq->sw_ring) {
1558 txgbe_rx_queue_release(rxq);
1563 * Always allocate even if it's not going to be needed in order to
1564 * simplify the code.
1566 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
1567 * be requested in txgbe_dev_rx_init(), which is called later from
1571 rte_zmalloc_socket("rxq->sw_sc_ring",
1572 sizeof(struct txgbe_scattered_rx_entry) * len,
1573 RTE_CACHE_LINE_SIZE, socket_id);
1574 if (!rxq->sw_sc_ring) {
1575 txgbe_rx_queue_release(rxq);
1579 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
1580 "dma_addr=0x%" PRIx64,
1581 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
1582 rxq->rx_ring_phys_addr);
1584 dev->data->rx_queues[queue_idx] = rxq;
1586 txgbe_reset_rx_queue(adapter, rxq);
1592 txgbe_dev_free_queues(struct rte_eth_dev *dev)
1596 PMD_INIT_FUNC_TRACE();
1598 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1599 txgbe_dev_rx_queue_release(dev->data->rx_queues[i]);
1600 dev->data->rx_queues[i] = NULL;
1602 dev->data->nb_rx_queues = 0;
1604 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1605 txgbe_dev_tx_queue_release(dev->data->tx_queues[i]);
1606 dev->data->tx_queues[i] = NULL;
1608 dev->data->nb_tx_queues = 0;
1612 txgbe_set_rx_function(struct rte_eth_dev *dev)
1617 static int __rte_cold
1618 txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
1620 struct txgbe_rx_entry *rxe = rxq->sw_ring;
1624 /* Initialize software ring entries */
1625 for (i = 0; i < rxq->nb_rx_desc; i++) {
1626 volatile struct txgbe_rx_desc *rxd;
1627 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
1630 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
1631 (unsigned int)rxq->queue_id);
1635 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
1636 mbuf->port = rxq->port_id;
1639 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
1640 rxd = &rxq->rx_ring[i];
1641 TXGBE_RXD_HDRADDR(rxd, 0);
1642 TXGBE_RXD_PKTADDR(rxd, dma_addr);
1650 * txgbe_get_rscctl_maxdesc
1652 * @pool Memory pool of the Rx queue
1654 static inline uint32_t
1655 txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
1657 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
1660 RTE_IPV4_MAX_PKT_LEN /
1661 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
1664 return TXGBE_RXCFG_RSCMAX_16;
1665 else if (maxdesc >= 8)
1666 return TXGBE_RXCFG_RSCMAX_8;
1667 else if (maxdesc >= 4)
1668 return TXGBE_RXCFG_RSCMAX_4;
1670 return TXGBE_RXCFG_RSCMAX_1;
1674 * txgbe_set_rsc - configure RSC related port HW registers
1676 * Configures the port's RSC related registers.
1680 * Returns 0 in case of success or a non-zero error code
1683 txgbe_set_rsc(struct rte_eth_dev *dev)
1685 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1686 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1687 struct rte_eth_dev_info dev_info = { 0 };
1688 bool rsc_capable = false;
1694 dev->dev_ops->dev_infos_get(dev, &dev_info);
1695 if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO)
1698 if (!rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
1699 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
1704 /* RSC global configuration */
1706 if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
1707 (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
1708 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
1713 rfctl = rd32(hw, TXGBE_PSRCTL);
1714 if (rsc_capable && (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
1715 rfctl &= ~TXGBE_PSRCTL_RSCDIA;
1717 rfctl |= TXGBE_PSRCTL_RSCDIA;
1718 wr32(hw, TXGBE_PSRCTL, rfctl);
1720 /* If LRO hasn't been requested - we are done here. */
1721 if (!(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO))
1724 /* Set PSRCTL.RSCACK bit */
1725 rdrxctl = rd32(hw, TXGBE_PSRCTL);
1726 rdrxctl |= TXGBE_PSRCTL_RSCACK;
1727 wr32(hw, TXGBE_PSRCTL, rdrxctl);
1729 /* Per-queue RSC configuration */
1730 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1731 struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
1733 rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
1735 rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
1737 rd32(hw, TXGBE_ITR(rxq->reg_idx));
1740 * txgbe PMD doesn't support header-split at the moment.
1742 srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
1743 srrctl |= TXGBE_RXCFG_HDRLEN(128);
1746 * TODO: Consider setting the Receive Descriptor Minimum
1747 * Threshold Size for an RSC case. This is not an obviously
1748 * beneficiary option but the one worth considering...
1751 srrctl |= TXGBE_RXCFG_RSCENA;
1752 srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
1753 srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
1754 psrtype |= TXGBE_POOLRSS_L4HDR;
1757 * RSC: Set ITR interval corresponding to 2K ints/s.
1759 * Full-sized RSC aggregations for a 10Gb/s link will
1760 * arrive at about 20K aggregation/s rate.
1762 * 2K inst/s rate will make only 10% of the
1763 * aggregations to be closed due to the interrupt timer
1764 * expiration for a streaming at wire-speed case.
1766 * For a sparse streaming case this setting will yield
1767 * at most 500us latency for a single RSC aggregation.
1769 eitr &= ~TXGBE_ITR_IVAL_MASK;
1770 eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
1771 eitr |= TXGBE_ITR_WRDSA;
1773 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
1774 wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
1775 wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
1778 * RSC requires the mapping of the queue to the
1781 txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
1786 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
1792 * Initializes Receive Unit.
1795 txgbe_dev_rx_init(struct rte_eth_dev *dev)
1797 struct txgbe_hw *hw;
1798 struct txgbe_rx_queue *rxq;
1807 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
1810 PMD_INIT_FUNC_TRACE();
1811 hw = TXGBE_DEV_HW(dev);
1814 * Make sure receives are disabled while setting
1815 * up the RX context (registers, descriptor rings, etc.).
1817 wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
1818 wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
1820 /* Enable receipt of broadcasted frames */
1821 fctrl = rd32(hw, TXGBE_PSRCTL);
1822 fctrl |= TXGBE_PSRCTL_BCA;
1823 wr32(hw, TXGBE_PSRCTL, fctrl);
1826 * Configure CRC stripping, if any.
1828 hlreg0 = rd32(hw, TXGBE_SECRXCTL);
1829 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1830 hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
1832 hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
1833 wr32(hw, TXGBE_SECRXCTL, hlreg0);
1836 * Configure jumbo frame support, if any.
1838 if (rx_conf->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
1839 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
1840 TXGBE_FRMSZ_MAX(rx_conf->max_rx_pkt_len));
1842 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
1843 TXGBE_FRMSZ_MAX(TXGBE_FRAME_SIZE_DFT));
1847 * If loopback mode is configured, set LPBK bit.
1849 hlreg0 = rd32(hw, TXGBE_PSRCTL);
1850 if (hw->mac.type == txgbe_mac_raptor &&
1851 dev->data->dev_conf.lpbk_mode)
1852 hlreg0 |= TXGBE_PSRCTL_LBENA;
1854 hlreg0 &= ~TXGBE_PSRCTL_LBENA;
1856 wr32(hw, TXGBE_PSRCTL, hlreg0);
1859 * Assume no header split and no VLAN strip support
1860 * on any Rx queue first .
1862 rx_conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
1864 /* Setup RX queues */
1865 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1866 rxq = dev->data->rx_queues[i];
1869 * Reset crc_len in case it was changed after queue setup by a
1870 * call to configure.
1872 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1873 rxq->crc_len = RTE_ETHER_CRC_LEN;
1877 /* Setup the Base and Length of the Rx Descriptor Rings */
1878 bus_addr = rxq->rx_ring_phys_addr;
1879 wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
1880 (uint32_t)(bus_addr & BIT_MASK32));
1881 wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
1882 (uint32_t)(bus_addr >> 32));
1883 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
1884 wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
1886 srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
1888 /* Set if packets are dropped when no descriptors available */
1890 srrctl |= TXGBE_RXCFG_DROP;
1893 * Configure the RX buffer size in the PKTLEN field of
1894 * the RXCFG register of the queue.
1895 * The value is in 1 KB resolution. Valid values can be from
1898 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
1899 RTE_PKTMBUF_HEADROOM);
1900 buf_size = ROUND_UP(buf_size, 0x1 << 10);
1901 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
1903 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
1905 /* It adds dual VLAN length for supporting dual VLAN */
1906 if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
1907 2 * TXGBE_VLAN_TAG_SIZE > buf_size)
1908 dev->data->scattered_rx = 1;
1909 if (rxq->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1910 rx_conf->offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
1913 if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
1914 dev->data->scattered_rx = 1;
1917 * Setup the Checksum Register.
1918 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
1919 * Enable IP/L4 checksum computation by hardware if requested to do so.
1921 rxcsum = rd32(hw, TXGBE_PSRCTL);
1922 rxcsum |= TXGBE_PSRCTL_PCSD;
1923 if (rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM)
1924 rxcsum |= TXGBE_PSRCTL_L4CSUM;
1926 rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
1928 wr32(hw, TXGBE_PSRCTL, rxcsum);
1930 if (hw->mac.type == txgbe_mac_raptor) {
1931 rdrxctl = rd32(hw, TXGBE_SECRXCTL);
1932 if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
1933 rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
1935 rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
1936 wr32(hw, TXGBE_SECRXCTL, rdrxctl);
1939 rc = txgbe_set_rsc(dev);
1943 txgbe_set_rx_function(dev);
1949 * Initializes Transmit Unit.
1952 txgbe_dev_tx_init(struct rte_eth_dev *dev)
1954 struct txgbe_hw *hw;
1955 struct txgbe_tx_queue *txq;
1959 PMD_INIT_FUNC_TRACE();
1960 hw = TXGBE_DEV_HW(dev);
1962 /* Setup the Base and Length of the Tx Descriptor Rings */
1963 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1964 txq = dev->data->tx_queues[i];
1966 bus_addr = txq->tx_ring_phys_addr;
1967 wr32(hw, TXGBE_TXBAL(txq->reg_idx),
1968 (uint32_t)(bus_addr & BIT_MASK32));
1969 wr32(hw, TXGBE_TXBAH(txq->reg_idx),
1970 (uint32_t)(bus_addr >> 32));
1971 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
1972 TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
1973 /* Setup the HW Tx Head and TX Tail descriptor pointers */
1974 wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
1975 wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
1980 txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
1982 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
1983 *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
1984 *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
1985 *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
1989 txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
1991 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
1992 wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
1993 wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
1994 wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
1998 txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
2000 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
2001 *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
2002 *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
2003 *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
2007 txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
2009 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
2010 wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
2011 wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
2012 wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
2016 * Start Receive Units for specified queue.
2019 txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2021 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2022 struct txgbe_rx_queue *rxq;
2026 PMD_INIT_FUNC_TRACE();
2028 rxq = dev->data->rx_queues[rx_queue_id];
2030 /* Allocate buffers for descriptor rings */
2031 if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
2032 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
2036 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
2037 rxdctl |= TXGBE_RXCFG_ENA;
2038 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
2040 /* Wait until RX Enable ready */
2041 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
2044 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
2045 } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
2047 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
2049 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
2050 wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
2051 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2057 * Stop Receive Units for specified queue.
2060 txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
2062 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2063 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2064 struct txgbe_rx_queue *rxq;
2068 PMD_INIT_FUNC_TRACE();
2070 rxq = dev->data->rx_queues[rx_queue_id];
2072 txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
2073 wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
2075 /* Wait until RX Enable bit clear */
2076 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
2079 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
2080 } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
2082 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
2084 rte_delay_us(RTE_TXGBE_WAIT_100_US);
2085 txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
2087 txgbe_rx_queue_release_mbufs(rxq);
2088 txgbe_reset_rx_queue(adapter, rxq);
2089 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
2095 * Start Transmit Units for specified queue.
2098 txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2100 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2101 struct txgbe_tx_queue *txq;
2105 PMD_INIT_FUNC_TRACE();
2107 txq = dev->data->tx_queues[tx_queue_id];
2108 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
2110 /* Wait until TX Enable ready */
2111 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
2114 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
2115 } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
2117 PMD_INIT_LOG(ERR, "Could not enable "
2118 "Tx Queue %d", tx_queue_id);
2121 wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
2122 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
2128 * Stop Transmit Units for specified queue.
2131 txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
2133 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2134 struct txgbe_tx_queue *txq;
2136 uint32_t txtdh, txtdt;
2139 PMD_INIT_FUNC_TRACE();
2141 txq = dev->data->tx_queues[tx_queue_id];
2143 /* Wait until TX queue is empty */
2144 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
2146 rte_delay_us(RTE_TXGBE_WAIT_100_US);
2147 txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
2148 txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
2149 } while (--poll_ms && (txtdh != txtdt));
2152 "Tx Queue %d is not empty when stopping.",
2155 txgbe_dev_save_tx_queue(hw, txq->reg_idx);
2156 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
2158 /* Wait until TX Enable bit clear */
2159 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
2162 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
2163 } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
2165 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
2168 rte_delay_us(RTE_TXGBE_WAIT_100_US);
2169 txgbe_dev_store_tx_queue(hw, txq->reg_idx);
2171 if (txq->ops != NULL) {
2172 txq->ops->release_mbufs(txq);
2173 txq->ops->reset(txq);
2175 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;