1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_cycles.h>
21 #include <rte_debug.h>
22 #include <rte_ethdev.h>
23 #include <ethdev_driver.h>
24 #include <rte_security_driver.h>
25 #include <rte_memzone.h>
26 #include <rte_atomic.h>
27 #include <rte_mempool.h>
28 #include <rte_malloc.h>
30 #include <rte_ether.h>
31 #include <rte_prefetch.h>
35 #include <rte_string_fns.h>
36 #include <rte_errno.h>
40 #include "txgbe_logs.h"
41 #include "base/txgbe.h"
42 #include "txgbe_ethdev.h"
43 #include "txgbe_rxtx.h"
45 #ifdef RTE_LIBRTE_IEEE1588
46 #define TXGBE_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
48 #define TXGBE_TX_IEEE1588_TMST 0
51 /* Bit Mask to indicate what bits required for building TX context */
52 static const u64 TXGBE_TX_OFFLOAD_MASK = (RTE_MBUF_F_TX_IP_CKSUM |
53 RTE_MBUF_F_TX_OUTER_IPV6 |
54 RTE_MBUF_F_TX_OUTER_IPV4 |
58 RTE_MBUF_F_TX_L4_MASK |
59 RTE_MBUF_F_TX_TCP_SEG |
60 RTE_MBUF_F_TX_TUNNEL_MASK |
61 RTE_MBUF_F_TX_OUTER_IP_CKSUM |
62 RTE_MBUF_F_TX_OUTER_UDP_CKSUM |
63 #ifdef RTE_LIB_SECURITY
64 RTE_MBUF_F_TX_SEC_OFFLOAD |
66 TXGBE_TX_IEEE1588_TMST);
68 #define TXGBE_TX_OFFLOAD_NOTSUP_MASK \
69 (RTE_MBUF_F_TX_OFFLOAD_MASK ^ TXGBE_TX_OFFLOAD_MASK)
72 * Prefetch a cache line into all cache levels.
74 #define rte_txgbe_prefetch(p) rte_prefetch0(p)
77 txgbe_is_vf(struct rte_eth_dev *dev)
79 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
81 switch (hw->mac.type) {
82 case txgbe_mac_raptor_vf:
89 /*********************************************************************
93 **********************************************************************/
96 * Check for descriptors with their DD bit set and free mbufs.
97 * Return the total number of buffers freed.
99 static __rte_always_inline int
100 txgbe_tx_free_bufs(struct txgbe_tx_queue *txq)
102 struct txgbe_tx_entry *txep;
105 struct rte_mbuf *m, *free[RTE_TXGBE_TX_MAX_FREE_BUF_SZ];
107 /* check DD bit on threshold descriptor */
108 status = txq->tx_ring[txq->tx_next_dd].dw3;
109 if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
110 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
111 txgbe_set32_masked(txq->tdc_reg_addr,
112 TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
117 * first buffer to free from S/W ring is at index
118 * tx_next_dd - (tx_free_thresh-1)
120 txep = &txq->sw_ring[txq->tx_next_dd - (txq->tx_free_thresh - 1)];
121 for (i = 0; i < txq->tx_free_thresh; ++i, ++txep) {
122 /* free buffers one at a time */
123 m = rte_pktmbuf_prefree_seg(txep->mbuf);
126 if (unlikely(m == NULL))
129 if (nb_free >= RTE_TXGBE_TX_MAX_FREE_BUF_SZ ||
130 (nb_free > 0 && m->pool != free[0]->pool)) {
131 rte_mempool_put_bulk(free[0]->pool,
132 (void **)free, nb_free);
140 rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free);
142 /* buffers were freed, update counters */
143 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_free_thresh);
144 txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_free_thresh);
145 if (txq->tx_next_dd >= txq->nb_tx_desc)
146 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
148 return txq->tx_free_thresh;
151 /* Populate 4 descriptors with data from 4 mbufs */
153 tx4(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
155 uint64_t buf_dma_addr;
159 for (i = 0; i < 4; ++i, ++txdp, ++pkts) {
160 buf_dma_addr = rte_mbuf_data_iova(*pkts);
161 pkt_len = (*pkts)->data_len;
163 /* write data to descriptor */
164 txdp->qw0 = rte_cpu_to_le_64(buf_dma_addr);
165 txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
166 TXGBE_TXD_DATLEN(pkt_len));
167 txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
169 rte_prefetch0(&(*pkts)->pool);
173 /* Populate 1 descriptor with data from 1 mbuf */
175 tx1(volatile struct txgbe_tx_desc *txdp, struct rte_mbuf **pkts)
177 uint64_t buf_dma_addr;
180 buf_dma_addr = rte_mbuf_data_iova(*pkts);
181 pkt_len = (*pkts)->data_len;
183 /* write data to descriptor */
184 txdp->qw0 = cpu_to_le64(buf_dma_addr);
185 txdp->dw2 = cpu_to_le32(TXGBE_TXD_FLAGS |
186 TXGBE_TXD_DATLEN(pkt_len));
187 txdp->dw3 = cpu_to_le32(TXGBE_TXD_PAYLEN(pkt_len));
189 rte_prefetch0(&(*pkts)->pool);
193 * Fill H/W descriptor ring with mbuf data.
194 * Copy mbuf pointers to the S/W ring.
197 txgbe_tx_fill_hw_ring(struct txgbe_tx_queue *txq, struct rte_mbuf **pkts,
200 volatile struct txgbe_tx_desc *txdp = &txq->tx_ring[txq->tx_tail];
201 struct txgbe_tx_entry *txep = &txq->sw_ring[txq->tx_tail];
202 const int N_PER_LOOP = 4;
203 const int N_PER_LOOP_MASK = N_PER_LOOP - 1;
204 int mainpart, leftover;
208 * Process most of the packets in chunks of N pkts. Any
209 * leftover packets will get processed one at a time.
211 mainpart = (nb_pkts & ((uint32_t)~N_PER_LOOP_MASK));
212 leftover = (nb_pkts & ((uint32_t)N_PER_LOOP_MASK));
213 for (i = 0; i < mainpart; i += N_PER_LOOP) {
214 /* Copy N mbuf pointers to the S/W ring */
215 for (j = 0; j < N_PER_LOOP; ++j)
216 (txep + i + j)->mbuf = *(pkts + i + j);
217 tx4(txdp + i, pkts + i);
220 if (unlikely(leftover > 0)) {
221 for (i = 0; i < leftover; ++i) {
222 (txep + mainpart + i)->mbuf = *(pkts + mainpart + i);
223 tx1(txdp + mainpart + i, pkts + mainpart + i);
228 static inline uint16_t
229 tx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
232 struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
236 * Begin scanning the H/W ring for done descriptors when the
237 * number of available descriptors drops below tx_free_thresh. For
238 * each done descriptor, free the associated buffer.
240 if (txq->nb_tx_free < txq->tx_free_thresh)
241 txgbe_tx_free_bufs(txq);
243 /* Only use descriptors that are available */
244 nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
245 if (unlikely(nb_pkts == 0))
248 /* Use exactly nb_pkts descriptors */
249 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
252 * At this point, we know there are enough descriptors in the
253 * ring to transmit all the packets. This assumes that each
254 * mbuf contains a single segment, and that no new offloads
255 * are expected, which would require a new context descriptor.
259 * See if we're going to wrap-around. If so, handle the top
260 * of the descriptor ring first, then do the bottom. If not,
261 * the processing looks just like the "bottom" part anyway...
263 if ((txq->tx_tail + nb_pkts) > txq->nb_tx_desc) {
264 n = (uint16_t)(txq->nb_tx_desc - txq->tx_tail);
265 txgbe_tx_fill_hw_ring(txq, tx_pkts, n);
269 /* Fill H/W descriptor ring with mbuf data */
270 txgbe_tx_fill_hw_ring(txq, tx_pkts + n, (uint16_t)(nb_pkts - n));
271 txq->tx_tail = (uint16_t)(txq->tx_tail + (nb_pkts - n));
274 * Check for wrap-around. This would only happen if we used
275 * up to the last descriptor in the ring, no more, no less.
277 if (txq->tx_tail >= txq->nb_tx_desc)
280 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
281 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
282 (uint16_t)txq->tx_tail, (uint16_t)nb_pkts);
284 /* update tail pointer */
286 txgbe_set32_relaxed(txq->tdt_reg_addr, txq->tx_tail);
292 txgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
297 /* Try to transmit at least chunks of TX_MAX_BURST pkts */
298 if (likely(nb_pkts <= RTE_PMD_TXGBE_TX_MAX_BURST))
299 return tx_xmit_pkts(tx_queue, tx_pkts, nb_pkts);
301 /* transmit more than the max burst, in chunks of TX_MAX_BURST */
306 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_TX_MAX_BURST);
307 ret = tx_xmit_pkts(tx_queue, &tx_pkts[nb_tx], n);
308 nb_tx = (uint16_t)(nb_tx + ret);
309 nb_pkts = (uint16_t)(nb_pkts - ret);
318 txgbe_set_xmit_ctx(struct txgbe_tx_queue *txq,
319 volatile struct txgbe_tx_ctx_desc *ctx_txd,
320 uint64_t ol_flags, union txgbe_tx_offload tx_offload,
321 __rte_unused uint64_t *mdata)
323 union txgbe_tx_offload tx_offload_mask;
324 uint32_t type_tucmd_mlhl;
325 uint32_t mss_l4len_idx;
327 uint32_t vlan_macip_lens;
328 uint32_t tunnel_seed;
330 ctx_idx = txq->ctx_curr;
331 tx_offload_mask.data[0] = 0;
332 tx_offload_mask.data[1] = 0;
334 /* Specify which HW CTX to upload. */
335 mss_l4len_idx = TXGBE_TXD_IDX(ctx_idx);
336 type_tucmd_mlhl = TXGBE_TXD_CTXT;
338 tx_offload_mask.ptid |= ~0;
339 type_tucmd_mlhl |= TXGBE_TXD_PTID(tx_offload.ptid);
341 /* check if TCP segmentation required for this packet */
342 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
343 tx_offload_mask.l2_len |= ~0;
344 tx_offload_mask.l3_len |= ~0;
345 tx_offload_mask.l4_len |= ~0;
346 tx_offload_mask.tso_segsz |= ~0;
347 mss_l4len_idx |= TXGBE_TXD_MSS(tx_offload.tso_segsz);
348 mss_l4len_idx |= TXGBE_TXD_L4LEN(tx_offload.l4_len);
349 } else { /* no TSO, check if hardware checksum is needed */
350 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
351 tx_offload_mask.l2_len |= ~0;
352 tx_offload_mask.l3_len |= ~0;
355 switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
356 case RTE_MBUF_F_TX_UDP_CKSUM:
358 TXGBE_TXD_L4LEN(sizeof(struct rte_udp_hdr));
359 tx_offload_mask.l2_len |= ~0;
360 tx_offload_mask.l3_len |= ~0;
362 case RTE_MBUF_F_TX_TCP_CKSUM:
364 TXGBE_TXD_L4LEN(sizeof(struct rte_tcp_hdr));
365 tx_offload_mask.l2_len |= ~0;
366 tx_offload_mask.l3_len |= ~0;
368 case RTE_MBUF_F_TX_SCTP_CKSUM:
370 TXGBE_TXD_L4LEN(sizeof(struct rte_sctp_hdr));
371 tx_offload_mask.l2_len |= ~0;
372 tx_offload_mask.l3_len |= ~0;
379 vlan_macip_lens = TXGBE_TXD_IPLEN(tx_offload.l3_len >> 1);
381 if (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
382 tx_offload_mask.outer_tun_len |= ~0;
383 tx_offload_mask.outer_l2_len |= ~0;
384 tx_offload_mask.outer_l3_len |= ~0;
385 tx_offload_mask.l2_len |= ~0;
386 tunnel_seed = TXGBE_TXD_ETUNLEN(tx_offload.outer_tun_len >> 1);
387 tunnel_seed |= TXGBE_TXD_EIPLEN(tx_offload.outer_l3_len >> 2);
389 switch (ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
390 case RTE_MBUF_F_TX_TUNNEL_IPIP:
391 /* for non UDP / GRE tunneling, set to 0b */
393 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
394 case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
395 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
396 tunnel_seed |= TXGBE_TXD_ETYPE_UDP;
398 case RTE_MBUF_F_TX_TUNNEL_GRE:
399 tunnel_seed |= TXGBE_TXD_ETYPE_GRE;
402 PMD_TX_LOG(ERR, "Tunnel type not supported");
405 vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.outer_l2_len);
408 vlan_macip_lens |= TXGBE_TXD_MACLEN(tx_offload.l2_len);
411 if (ol_flags & RTE_MBUF_F_TX_VLAN) {
412 tx_offload_mask.vlan_tci |= ~0;
413 vlan_macip_lens |= TXGBE_TXD_VLAN(tx_offload.vlan_tci);
416 #ifdef RTE_LIB_SECURITY
417 if (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD) {
418 union txgbe_crypto_tx_desc_md *md =
419 (union txgbe_crypto_tx_desc_md *)mdata;
420 tunnel_seed |= TXGBE_TXD_IPSEC_SAIDX(md->sa_idx);
421 type_tucmd_mlhl |= md->enc ?
422 (TXGBE_TXD_IPSEC_ESP | TXGBE_TXD_IPSEC_ESPENC) : 0;
423 type_tucmd_mlhl |= TXGBE_TXD_IPSEC_ESPLEN(md->pad_len);
424 tx_offload_mask.sa_idx |= ~0;
425 tx_offload_mask.sec_pad_len |= ~0;
429 txq->ctx_cache[ctx_idx].flags = ol_flags;
430 txq->ctx_cache[ctx_idx].tx_offload.data[0] =
431 tx_offload_mask.data[0] & tx_offload.data[0];
432 txq->ctx_cache[ctx_idx].tx_offload.data[1] =
433 tx_offload_mask.data[1] & tx_offload.data[1];
434 txq->ctx_cache[ctx_idx].tx_offload_mask = tx_offload_mask;
436 ctx_txd->dw0 = rte_cpu_to_le_32(vlan_macip_lens);
437 ctx_txd->dw1 = rte_cpu_to_le_32(tunnel_seed);
438 ctx_txd->dw2 = rte_cpu_to_le_32(type_tucmd_mlhl);
439 ctx_txd->dw3 = rte_cpu_to_le_32(mss_l4len_idx);
443 * Check which hardware context can be used. Use the existing match
444 * or create a new context descriptor.
446 static inline uint32_t
447 what_ctx_update(struct txgbe_tx_queue *txq, uint64_t flags,
448 union txgbe_tx_offload tx_offload)
450 /* If match with the current used context */
451 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
452 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
453 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
454 & tx_offload.data[0])) &&
455 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
456 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
457 & tx_offload.data[1]))))
458 return txq->ctx_curr;
460 /* What if match with the next context */
462 if (likely(txq->ctx_cache[txq->ctx_curr].flags == flags &&
463 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] ==
464 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0]
465 & tx_offload.data[0])) &&
466 (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] ==
467 (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1]
468 & tx_offload.data[1]))))
469 return txq->ctx_curr;
471 /* Mismatch, use the previous context */
472 return TXGBE_CTX_NUM;
475 static inline uint32_t
476 tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags)
480 if ((ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM) {
482 tmp |= TXGBE_TXD_L4CS;
484 if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
486 tmp |= TXGBE_TXD_IPCS;
488 if (ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) {
490 tmp |= TXGBE_TXD_EIPCS;
492 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
494 /* implies IPv4 cksum */
495 if (ol_flags & RTE_MBUF_F_TX_IPV4)
496 tmp |= TXGBE_TXD_IPCS;
497 tmp |= TXGBE_TXD_L4CS;
499 if (ol_flags & RTE_MBUF_F_TX_VLAN)
505 static inline uint32_t
506 tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags)
508 uint32_t cmdtype = 0;
510 if (ol_flags & RTE_MBUF_F_TX_VLAN)
511 cmdtype |= TXGBE_TXD_VLE;
512 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
513 cmdtype |= TXGBE_TXD_TSE;
514 if (ol_flags & RTE_MBUF_F_TX_MACSEC)
515 cmdtype |= TXGBE_TXD_LINKSEC;
519 static inline uint8_t
520 tx_desc_ol_flags_to_ptid(uint64_t oflags, uint32_t ptype)
525 return txgbe_encode_ptype(ptype);
527 /* Only support flags in TXGBE_TX_OFFLOAD_MASK */
528 tun = !!(oflags & RTE_MBUF_F_TX_TUNNEL_MASK);
531 ptype = RTE_PTYPE_L2_ETHER;
532 if (oflags & RTE_MBUF_F_TX_VLAN)
533 ptype |= RTE_PTYPE_L2_ETHER_VLAN;
536 if (oflags & (RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM))
537 ptype |= RTE_PTYPE_L3_IPV4;
538 else if (oflags & (RTE_MBUF_F_TX_OUTER_IPV6))
539 ptype |= RTE_PTYPE_L3_IPV6;
541 if (oflags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IP_CKSUM))
542 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4);
543 else if (oflags & (RTE_MBUF_F_TX_IPV6))
544 ptype |= (tun ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6);
547 switch (oflags & (RTE_MBUF_F_TX_L4_MASK)) {
548 case RTE_MBUF_F_TX_TCP_CKSUM:
549 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
551 case RTE_MBUF_F_TX_UDP_CKSUM:
552 ptype |= (tun ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP);
554 case RTE_MBUF_F_TX_SCTP_CKSUM:
555 ptype |= (tun ? RTE_PTYPE_INNER_L4_SCTP : RTE_PTYPE_L4_SCTP);
559 if (oflags & RTE_MBUF_F_TX_TCP_SEG)
560 ptype |= (tun ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP);
563 switch (oflags & RTE_MBUF_F_TX_TUNNEL_MASK) {
564 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
565 case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
566 ptype |= RTE_PTYPE_L2_ETHER |
568 RTE_PTYPE_TUNNEL_GRENAT;
570 case RTE_MBUF_F_TX_TUNNEL_GRE:
571 ptype |= RTE_PTYPE_L2_ETHER |
573 RTE_PTYPE_TUNNEL_GRE;
574 ptype |= RTE_PTYPE_INNER_L2_ETHER;
576 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
577 ptype |= RTE_PTYPE_L2_ETHER |
579 RTE_PTYPE_TUNNEL_GENEVE;
580 ptype |= RTE_PTYPE_INNER_L2_ETHER;
582 case RTE_MBUF_F_TX_TUNNEL_IPIP:
583 case RTE_MBUF_F_TX_TUNNEL_IP:
584 ptype |= RTE_PTYPE_L2_ETHER |
590 return txgbe_encode_ptype(ptype);
593 #ifndef DEFAULT_TX_FREE_THRESH
594 #define DEFAULT_TX_FREE_THRESH 32
597 /* Reset transmit descriptors after they have been used */
599 txgbe_xmit_cleanup(struct txgbe_tx_queue *txq)
601 struct txgbe_tx_entry *sw_ring = txq->sw_ring;
602 volatile struct txgbe_tx_desc *txr = txq->tx_ring;
603 uint16_t last_desc_cleaned = txq->last_desc_cleaned;
604 uint16_t nb_tx_desc = txq->nb_tx_desc;
605 uint16_t desc_to_clean_to;
606 uint16_t nb_tx_to_clean;
609 /* Determine the last descriptor needing to be cleaned */
610 desc_to_clean_to = (uint16_t)(last_desc_cleaned + txq->tx_free_thresh);
611 if (desc_to_clean_to >= nb_tx_desc)
612 desc_to_clean_to = (uint16_t)(desc_to_clean_to - nb_tx_desc);
614 /* Check to make sure the last descriptor to clean is done */
615 desc_to_clean_to = sw_ring[desc_to_clean_to].last_id;
616 status = txr[desc_to_clean_to].dw3;
617 if (!(status & rte_cpu_to_le_32(TXGBE_TXD_DD))) {
618 PMD_TX_FREE_LOG(DEBUG,
619 "TX descriptor %4u is not done"
620 "(port=%d queue=%d)",
622 txq->port_id, txq->queue_id);
623 if (txq->nb_tx_free >> 1 < txq->tx_free_thresh)
624 txgbe_set32_masked(txq->tdc_reg_addr,
625 TXGBE_TXCFG_FLUSH, TXGBE_TXCFG_FLUSH);
626 /* Failed to clean any descriptors, better luck next time */
630 /* Figure out how many descriptors will be cleaned */
631 if (last_desc_cleaned > desc_to_clean_to)
632 nb_tx_to_clean = (uint16_t)((nb_tx_desc - last_desc_cleaned) +
635 nb_tx_to_clean = (uint16_t)(desc_to_clean_to -
638 PMD_TX_FREE_LOG(DEBUG,
639 "Cleaning %4u TX descriptors: %4u to %4u "
640 "(port=%d queue=%d)",
641 nb_tx_to_clean, last_desc_cleaned, desc_to_clean_to,
642 txq->port_id, txq->queue_id);
645 * The last descriptor to clean is done, so that means all the
646 * descriptors from the last descriptor that was cleaned
647 * up to the last descriptor with the RS bit set
648 * are done. Only reset the threshold descriptor.
650 txr[desc_to_clean_to].dw3 = 0;
652 /* Update the txq to reflect the last descriptor that was cleaned */
653 txq->last_desc_cleaned = desc_to_clean_to;
654 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + nb_tx_to_clean);
660 static inline uint8_t
661 txgbe_get_tun_len(struct rte_mbuf *mbuf)
663 struct txgbe_genevehdr genevehdr;
664 const struct txgbe_genevehdr *gh;
667 switch (mbuf->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK) {
668 case RTE_MBUF_F_TX_TUNNEL_IPIP:
671 case RTE_MBUF_F_TX_TUNNEL_VXLAN:
672 case RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE:
673 tun_len = sizeof(struct txgbe_udphdr)
674 + sizeof(struct txgbe_vxlanhdr);
676 case RTE_MBUF_F_TX_TUNNEL_GRE:
677 tun_len = sizeof(struct txgbe_nvgrehdr);
679 case RTE_MBUF_F_TX_TUNNEL_GENEVE:
680 gh = rte_pktmbuf_read(mbuf,
681 mbuf->outer_l2_len + mbuf->outer_l3_len,
682 sizeof(genevehdr), &genevehdr);
683 tun_len = sizeof(struct txgbe_udphdr)
684 + sizeof(struct txgbe_genevehdr)
685 + (gh->opt_len << 2);
694 static inline uint8_t
695 txgbe_parse_tun_ptid(struct rte_mbuf *tx_pkt)
697 uint64_t l2_none, l2_mac, l2_mac_vlan;
700 if ((tx_pkt->ol_flags & (RTE_MBUF_F_TX_TUNNEL_VXLAN |
701 RTE_MBUF_F_TX_TUNNEL_VXLAN_GPE)) == 0)
704 l2_none = sizeof(struct txgbe_udphdr) + sizeof(struct txgbe_vxlanhdr);
705 l2_mac = l2_none + sizeof(struct rte_ether_hdr);
706 l2_mac_vlan = l2_mac + sizeof(struct rte_vlan_hdr);
708 if (tx_pkt->l2_len == l2_none)
709 ptid = TXGBE_PTID_TUN_EIG;
710 else if (tx_pkt->l2_len == l2_mac)
711 ptid = TXGBE_PTID_TUN_EIGM;
712 else if (tx_pkt->l2_len == l2_mac_vlan)
713 ptid = TXGBE_PTID_TUN_EIGMV;
719 txgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
722 struct txgbe_tx_queue *txq;
723 struct txgbe_tx_entry *sw_ring;
724 struct txgbe_tx_entry *txe, *txn;
725 volatile struct txgbe_tx_desc *txr;
726 volatile struct txgbe_tx_desc *txd;
727 struct rte_mbuf *tx_pkt;
728 struct rte_mbuf *m_seg;
729 uint64_t buf_dma_addr;
730 uint32_t olinfo_status;
731 uint32_t cmd_type_len;
742 union txgbe_tx_offload tx_offload;
743 #ifdef RTE_LIB_SECURITY
747 tx_offload.data[0] = 0;
748 tx_offload.data[1] = 0;
750 sw_ring = txq->sw_ring;
752 tx_id = txq->tx_tail;
753 txe = &sw_ring[tx_id];
755 /* Determine if the descriptor ring needs to be cleaned. */
756 if (txq->nb_tx_free < txq->tx_free_thresh)
757 txgbe_xmit_cleanup(txq);
759 rte_prefetch0(&txe->mbuf->pool);
762 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
765 pkt_len = tx_pkt->pkt_len;
768 * Determine how many (if any) context descriptors
769 * are needed for offload functionality.
771 ol_flags = tx_pkt->ol_flags;
772 #ifdef RTE_LIB_SECURITY
773 use_ipsec = txq->using_ipsec && (ol_flags & RTE_MBUF_F_TX_SEC_OFFLOAD);
776 /* If hardware offload required */
777 tx_ol_req = ol_flags & TXGBE_TX_OFFLOAD_MASK;
779 tx_offload.ptid = tx_desc_ol_flags_to_ptid(tx_ol_req,
780 tx_pkt->packet_type);
781 if (tx_offload.ptid & TXGBE_PTID_PKT_TUN)
782 tx_offload.ptid |= txgbe_parse_tun_ptid(tx_pkt);
783 tx_offload.l2_len = tx_pkt->l2_len;
784 tx_offload.l3_len = tx_pkt->l3_len;
785 tx_offload.l4_len = tx_pkt->l4_len;
786 tx_offload.vlan_tci = tx_pkt->vlan_tci;
787 tx_offload.tso_segsz = tx_pkt->tso_segsz;
788 tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
789 tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
790 tx_offload.outer_tun_len = txgbe_get_tun_len(tx_pkt);
792 #ifdef RTE_LIB_SECURITY
794 union txgbe_crypto_tx_desc_md *ipsec_mdata =
795 (union txgbe_crypto_tx_desc_md *)
796 rte_security_dynfield(tx_pkt);
797 tx_offload.sa_idx = ipsec_mdata->sa_idx;
798 tx_offload.sec_pad_len = ipsec_mdata->pad_len;
802 /* If new context need be built or reuse the exist ctx*/
803 ctx = what_ctx_update(txq, tx_ol_req, tx_offload);
804 /* Only allocate context descriptor if required */
805 new_ctx = (ctx == TXGBE_CTX_NUM);
810 * Keep track of how many descriptors are used this loop
811 * This will always be the number of segments + the number of
812 * Context descriptors required to transmit the packet
814 nb_used = (uint16_t)(tx_pkt->nb_segs + new_ctx);
817 * The number of descriptors that must be allocated for a
818 * packet is the number of segments of that packet, plus 1
819 * Context Descriptor for the hardware offload, if any.
820 * Determine the last TX descriptor to allocate in the TX ring
821 * for the packet, starting from the current position (tx_id)
824 tx_last = (uint16_t)(tx_id + nb_used - 1);
827 if (tx_last >= txq->nb_tx_desc)
828 tx_last = (uint16_t)(tx_last - txq->nb_tx_desc);
830 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u pktlen=%u"
831 " tx_first=%u tx_last=%u",
832 (uint16_t)txq->port_id,
833 (uint16_t)txq->queue_id,
839 * Make sure there are enough TX descriptors available to
840 * transmit the entire packet.
841 * nb_used better be less than or equal to txq->tx_free_thresh
843 if (nb_used > txq->nb_tx_free) {
844 PMD_TX_FREE_LOG(DEBUG,
845 "Not enough free TX descriptors "
846 "nb_used=%4u nb_free=%4u "
847 "(port=%d queue=%d)",
848 nb_used, txq->nb_tx_free,
849 txq->port_id, txq->queue_id);
851 if (txgbe_xmit_cleanup(txq) != 0) {
852 /* Could not clean any descriptors */
858 /* nb_used better be <= txq->tx_free_thresh */
859 if (unlikely(nb_used > txq->tx_free_thresh)) {
860 PMD_TX_FREE_LOG(DEBUG,
861 "The number of descriptors needed to "
862 "transmit the packet exceeds the "
863 "RS bit threshold. This will impact "
865 "nb_used=%4u nb_free=%4u "
866 "tx_free_thresh=%4u. "
867 "(port=%d queue=%d)",
868 nb_used, txq->nb_tx_free,
870 txq->port_id, txq->queue_id);
872 * Loop here until there are enough TX
873 * descriptors or until the ring cannot be
876 while (nb_used > txq->nb_tx_free) {
877 if (txgbe_xmit_cleanup(txq) != 0) {
879 * Could not clean any
891 * By now there are enough free TX descriptors to transmit
896 * Set common flags of all TX Data Descriptors.
898 * The following bits must be set in all Data Descriptors:
899 * - TXGBE_TXD_DTYP_DATA
900 * - TXGBE_TXD_DCMD_DEXT
902 * The following bits must be set in the first Data Descriptor
903 * and are ignored in the other ones:
904 * - TXGBE_TXD_DCMD_IFCS
905 * - TXGBE_TXD_MAC_1588
906 * - TXGBE_TXD_DCMD_VLE
908 * The following bits must only be set in the last Data
910 * - TXGBE_TXD_CMD_EOP
912 * The following bits can be set in any Data Descriptor, but
913 * are only set in the last Data Descriptor:
916 cmd_type_len = TXGBE_TXD_FCS;
918 #ifdef RTE_LIBRTE_IEEE1588
919 if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
920 cmd_type_len |= TXGBE_TXD_1588;
925 if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
926 /* when TSO is on, paylen in descriptor is the
927 * not the packet len but the tcp payload len
929 pkt_len -= (tx_offload.l2_len +
930 tx_offload.l3_len + tx_offload.l4_len);
932 (tx_pkt->ol_flags & RTE_MBUF_F_TX_TUNNEL_MASK)
933 ? tx_offload.outer_l2_len +
934 tx_offload.outer_l3_len : 0;
938 * Setup the TX Advanced Context Descriptor if required
941 volatile struct txgbe_tx_ctx_desc *ctx_txd;
943 ctx_txd = (volatile struct txgbe_tx_ctx_desc *)
946 txn = &sw_ring[txe->next_id];
947 rte_prefetch0(&txn->mbuf->pool);
949 if (txe->mbuf != NULL) {
950 rte_pktmbuf_free_seg(txe->mbuf);
954 txgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
956 rte_security_dynfield(tx_pkt));
958 txe->last_id = tx_last;
959 tx_id = txe->next_id;
964 * Setup the TX Advanced Data Descriptor,
965 * This path will go through
966 * whatever new/reuse the context descriptor
968 cmd_type_len |= tx_desc_ol_flags_to_cmdtype(ol_flags);
970 tx_desc_cksum_flags_to_olinfo(ol_flags);
971 olinfo_status |= TXGBE_TXD_IDX(ctx);
974 olinfo_status |= TXGBE_TXD_PAYLEN(pkt_len);
975 #ifdef RTE_LIB_SECURITY
977 olinfo_status |= TXGBE_TXD_IPSEC;
983 txn = &sw_ring[txe->next_id];
984 rte_prefetch0(&txn->mbuf->pool);
986 if (txe->mbuf != NULL)
987 rte_pktmbuf_free_seg(txe->mbuf);
991 * Set up Transmit Data Descriptor.
993 slen = m_seg->data_len;
994 buf_dma_addr = rte_mbuf_data_iova(m_seg);
995 txd->qw0 = rte_cpu_to_le_64(buf_dma_addr);
996 txd->dw2 = rte_cpu_to_le_32(cmd_type_len | slen);
997 txd->dw3 = rte_cpu_to_le_32(olinfo_status);
998 txe->last_id = tx_last;
999 tx_id = txe->next_id;
1001 m_seg = m_seg->next;
1002 } while (m_seg != NULL);
1005 * The last packet data descriptor needs End Of Packet (EOP)
1007 cmd_type_len |= TXGBE_TXD_EOP;
1008 txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_used);
1010 txd->dw2 |= rte_cpu_to_le_32(cmd_type_len);
1018 * Set the Transmit Descriptor Tail (TDT)
1020 PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u",
1021 (uint16_t)txq->port_id, (uint16_t)txq->queue_id,
1022 (uint16_t)tx_id, (uint16_t)nb_tx);
1023 txgbe_set32_relaxed(txq->tdt_reg_addr, tx_id);
1024 txq->tx_tail = tx_id;
1029 /*********************************************************************
1033 **********************************************************************/
1035 txgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1040 struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
1042 for (i = 0; i < nb_pkts; i++) {
1044 ol_flags = m->ol_flags;
1047 * Check if packet meets requirements for number of segments
1049 * NOTE: for txgbe it's always (40 - WTHRESH) for both TSO and
1053 if (m->nb_segs > TXGBE_TX_MAX_SEG - txq->wthresh) {
1054 rte_errno = -EINVAL;
1058 if (ol_flags & TXGBE_TX_OFFLOAD_NOTSUP_MASK) {
1059 rte_errno = -ENOTSUP;
1063 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1064 ret = rte_validate_tx_offload(m);
1070 ret = rte_net_intel_cksum_prepare(m);
1080 /*********************************************************************
1084 **********************************************************************/
1085 /* @note: fix txgbe_dev_supported_ptypes_get() if any change here. */
1086 static inline uint32_t
1087 txgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptid_mask)
1089 uint16_t ptid = TXGBE_RXD_PTID(pkt_info);
1093 return txgbe_decode_ptype(ptid);
1096 static inline uint64_t
1097 txgbe_rxd_pkt_info_to_pkt_flags(uint32_t pkt_info)
1099 static uint64_t ip_rss_types_map[16] __rte_cache_aligned = {
1100 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH,
1101 0, RTE_MBUF_F_RX_RSS_HASH, 0, RTE_MBUF_F_RX_RSS_HASH,
1102 RTE_MBUF_F_RX_RSS_HASH, 0, 0, 0,
1103 0, 0, 0, RTE_MBUF_F_RX_FDIR,
1105 #ifdef RTE_LIBRTE_IEEE1588
1106 static uint64_t ip_pkt_etqf_map[8] = {
1107 0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
1110 int etfid = txgbe_etflt_id(TXGBE_RXD_PTID(pkt_info));
1111 if (likely(-1 != etfid))
1112 return ip_pkt_etqf_map[etfid] |
1113 ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
1115 return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
1117 return ip_rss_types_map[TXGBE_RXD_RSSTYPE(pkt_info)];
1121 static inline uint64_t
1122 rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags)
1127 * Check if VLAN present only.
1128 * Do not check whether L3/L4 rx checksum done by NIC or not,
1129 * That can be found from rte_eth_rxmode.offloads flag
1131 pkt_flags = (rx_status & TXGBE_RXD_STAT_VLAN &&
1132 vlan_flags & RTE_MBUF_F_RX_VLAN_STRIPPED)
1135 #ifdef RTE_LIBRTE_IEEE1588
1136 if (rx_status & TXGBE_RXD_STAT_1588)
1137 pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
1142 static inline uint64_t
1143 rx_desc_error_to_pkt_flags(uint32_t rx_status)
1145 uint64_t pkt_flags = 0;
1147 /* checksum offload can't be disabled */
1148 if (rx_status & TXGBE_RXD_STAT_IPCS) {
1149 pkt_flags |= (rx_status & TXGBE_RXD_ERR_IPCS
1150 ? RTE_MBUF_F_RX_IP_CKSUM_BAD : RTE_MBUF_F_RX_IP_CKSUM_GOOD);
1153 if (rx_status & TXGBE_RXD_STAT_L4CS) {
1154 pkt_flags |= (rx_status & TXGBE_RXD_ERR_L4CS
1155 ? RTE_MBUF_F_RX_L4_CKSUM_BAD : RTE_MBUF_F_RX_L4_CKSUM_GOOD);
1158 if (rx_status & TXGBE_RXD_STAT_EIPCS &&
1159 rx_status & TXGBE_RXD_ERR_EIPCS) {
1160 pkt_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
1163 #ifdef RTE_LIB_SECURITY
1164 if (rx_status & TXGBE_RXD_STAT_SECP) {
1165 pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD;
1166 if (rx_status & TXGBE_RXD_ERR_SECERR)
1167 pkt_flags |= RTE_MBUF_F_RX_SEC_OFFLOAD_FAILED;
1175 * LOOK_AHEAD defines how many desc statuses to check beyond the
1176 * current descriptor.
1177 * It must be a pound define for optimal performance.
1178 * Do not change the value of LOOK_AHEAD, as the txgbe_rx_scan_hw_ring
1179 * function only works with LOOK_AHEAD=8.
1181 #define LOOK_AHEAD 8
1182 #if (LOOK_AHEAD != 8)
1183 #error "PMD TXGBE: LOOK_AHEAD must be 8\n"
1186 txgbe_rx_scan_hw_ring(struct txgbe_rx_queue *rxq)
1188 volatile struct txgbe_rx_desc *rxdp;
1189 struct txgbe_rx_entry *rxep;
1190 struct rte_mbuf *mb;
1194 uint32_t s[LOOK_AHEAD];
1195 uint32_t pkt_info[LOOK_AHEAD];
1196 int i, j, nb_rx = 0;
1199 /* get references to current descriptor and S/W ring entry */
1200 rxdp = &rxq->rx_ring[rxq->rx_tail];
1201 rxep = &rxq->sw_ring[rxq->rx_tail];
1203 status = rxdp->qw1.lo.status;
1204 /* check to make sure there is at least 1 packet to receive */
1205 if (!(status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
1209 * Scan LOOK_AHEAD descriptors at a time to determine which descriptors
1210 * reference packets that are ready to be received.
1212 for (i = 0; i < RTE_PMD_TXGBE_RX_MAX_BURST;
1213 i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) {
1214 /* Read desc statuses backwards to avoid race condition */
1215 for (j = 0; j < LOOK_AHEAD; j++)
1216 s[j] = rte_le_to_cpu_32(rxdp[j].qw1.lo.status);
1218 rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
1220 /* Compute how many status bits were set */
1221 for (nb_dd = 0; nb_dd < LOOK_AHEAD &&
1222 (s[nb_dd] & TXGBE_RXD_STAT_DD); nb_dd++)
1225 for (j = 0; j < nb_dd; j++)
1226 pkt_info[j] = rte_le_to_cpu_32(rxdp[j].qw0.dw0);
1230 /* Translate descriptor info to mbuf format */
1231 for (j = 0; j < nb_dd; ++j) {
1233 pkt_len = rte_le_to_cpu_16(rxdp[j].qw1.hi.len) -
1235 mb->data_len = pkt_len;
1236 mb->pkt_len = pkt_len;
1237 mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].qw1.hi.tag);
1239 /* convert descriptor fields to rte mbuf flags */
1240 pkt_flags = rx_desc_status_to_pkt_flags(s[j],
1242 pkt_flags |= rx_desc_error_to_pkt_flags(s[j]);
1244 txgbe_rxd_pkt_info_to_pkt_flags(pkt_info[j]);
1245 mb->ol_flags = pkt_flags;
1247 txgbe_rxd_pkt_info_to_pkt_type(pkt_info[j],
1248 rxq->pkt_type_mask);
1250 if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH))
1252 rte_le_to_cpu_32(rxdp[j].qw0.dw1);
1253 else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
1254 mb->hash.fdir.hash =
1255 rte_le_to_cpu_16(rxdp[j].qw0.hi.csum) &
1256 TXGBE_ATR_HASH_MASK;
1258 rte_le_to_cpu_16(rxdp[j].qw0.hi.ipid);
1262 /* Move mbuf pointers from the S/W ring to the stage */
1263 for (j = 0; j < LOOK_AHEAD; ++j)
1264 rxq->rx_stage[i + j] = rxep[j].mbuf;
1266 /* stop if all requested packets could not be received */
1267 if (nb_dd != LOOK_AHEAD)
1271 /* clear software ring entries so we can cleanup correctly */
1272 for (i = 0; i < nb_rx; ++i)
1273 rxq->sw_ring[rxq->rx_tail + i].mbuf = NULL;
1279 txgbe_rx_alloc_bufs(struct txgbe_rx_queue *rxq, bool reset_mbuf)
1281 volatile struct txgbe_rx_desc *rxdp;
1282 struct txgbe_rx_entry *rxep;
1283 struct rte_mbuf *mb;
1288 /* allocate buffers in bulk directly into the S/W ring */
1289 alloc_idx = rxq->rx_free_trigger - (rxq->rx_free_thresh - 1);
1290 rxep = &rxq->sw_ring[alloc_idx];
1291 diag = rte_mempool_get_bulk(rxq->mb_pool, (void *)rxep,
1292 rxq->rx_free_thresh);
1293 if (unlikely(diag != 0))
1296 rxdp = &rxq->rx_ring[alloc_idx];
1297 for (i = 0; i < rxq->rx_free_thresh; ++i) {
1298 /* populate the static rte mbuf fields */
1301 mb->port = rxq->port_id;
1303 rte_mbuf_refcnt_set(mb, 1);
1304 mb->data_off = RTE_PKTMBUF_HEADROOM;
1306 /* populate the descriptors */
1307 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mb));
1308 TXGBE_RXD_HDRADDR(&rxdp[i], 0);
1309 TXGBE_RXD_PKTADDR(&rxdp[i], dma_addr);
1312 /* update state of internal queue structure */
1313 rxq->rx_free_trigger = rxq->rx_free_trigger + rxq->rx_free_thresh;
1314 if (rxq->rx_free_trigger >= rxq->nb_rx_desc)
1315 rxq->rx_free_trigger = rxq->rx_free_thresh - 1;
1321 static inline uint16_t
1322 txgbe_rx_fill_from_stage(struct txgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts,
1325 struct rte_mbuf **stage = &rxq->rx_stage[rxq->rx_next_avail];
1328 /* how many packets are ready to return? */
1329 nb_pkts = (uint16_t)RTE_MIN(nb_pkts, rxq->rx_nb_avail);
1331 /* copy mbuf pointers to the application's packet list */
1332 for (i = 0; i < nb_pkts; ++i)
1333 rx_pkts[i] = stage[i];
1335 /* update internal queue state */
1336 rxq->rx_nb_avail = (uint16_t)(rxq->rx_nb_avail - nb_pkts);
1337 rxq->rx_next_avail = (uint16_t)(rxq->rx_next_avail + nb_pkts);
1342 static inline uint16_t
1343 txgbe_rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1346 struct txgbe_rx_queue *rxq = (struct txgbe_rx_queue *)rx_queue;
1347 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1350 /* Any previously recv'd pkts will be returned from the Rx stage */
1351 if (rxq->rx_nb_avail)
1352 return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1354 /* Scan the H/W ring for packets to receive */
1355 nb_rx = (uint16_t)txgbe_rx_scan_hw_ring(rxq);
1357 /* update internal queue state */
1358 rxq->rx_next_avail = 0;
1359 rxq->rx_nb_avail = nb_rx;
1360 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
1362 /* if required, allocate new buffers to replenish descriptors */
1363 if (rxq->rx_tail > rxq->rx_free_trigger) {
1364 uint16_t cur_free_trigger = rxq->rx_free_trigger;
1366 if (txgbe_rx_alloc_bufs(rxq, true) != 0) {
1369 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1370 "queue_id=%u", (uint16_t)rxq->port_id,
1371 (uint16_t)rxq->queue_id);
1373 dev->data->rx_mbuf_alloc_failed +=
1374 rxq->rx_free_thresh;
1377 * Need to rewind any previous receives if we cannot
1378 * allocate new buffers to replenish the old ones.
1380 rxq->rx_nb_avail = 0;
1381 rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
1382 for (i = 0, j = rxq->rx_tail; i < nb_rx; ++i, ++j)
1383 rxq->sw_ring[j].mbuf = rxq->rx_stage[i];
1388 /* update tail pointer */
1390 txgbe_set32_relaxed(rxq->rdt_reg_addr, cur_free_trigger);
1393 if (rxq->rx_tail >= rxq->nb_rx_desc)
1396 /* received any packets this loop? */
1397 if (rxq->rx_nb_avail)
1398 return txgbe_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
1403 /* split requests into chunks of size RTE_PMD_TXGBE_RX_MAX_BURST */
1405 txgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1410 if (unlikely(nb_pkts == 0))
1413 if (likely(nb_pkts <= RTE_PMD_TXGBE_RX_MAX_BURST))
1414 return txgbe_rx_recv_pkts(rx_queue, rx_pkts, nb_pkts);
1416 /* request is relatively large, chunk it up */
1421 n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_TXGBE_RX_MAX_BURST);
1422 ret = txgbe_rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n);
1423 nb_rx = (uint16_t)(nb_rx + ret);
1424 nb_pkts = (uint16_t)(nb_pkts - ret);
1433 txgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1436 struct txgbe_rx_queue *rxq;
1437 volatile struct txgbe_rx_desc *rx_ring;
1438 volatile struct txgbe_rx_desc *rxdp;
1439 struct txgbe_rx_entry *sw_ring;
1440 struct txgbe_rx_entry *rxe;
1441 struct rte_mbuf *rxm;
1442 struct rte_mbuf *nmb;
1443 struct txgbe_rx_desc rxd;
1456 rx_id = rxq->rx_tail;
1457 rx_ring = rxq->rx_ring;
1458 sw_ring = rxq->sw_ring;
1459 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1460 while (nb_rx < nb_pkts) {
1462 * The order of operations here is important as the DD status
1463 * bit must not be read after any other descriptor fields.
1464 * rx_ring and rxdp are pointing to volatile data so the order
1465 * of accesses cannot be reordered by the compiler. If they were
1466 * not volatile, they could be reordered which could lead to
1467 * using invalid descriptor fields when read from rxd.
1469 rxdp = &rx_ring[rx_id];
1470 staterr = rxdp->qw1.lo.status;
1471 if (!(staterr & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD)))
1478 * If the TXGBE_RXD_STAT_EOP flag is not set, the RX packet
1479 * is likely to be invalid and to be dropped by the various
1480 * validation checks performed by the network stack.
1482 * Allocate a new mbuf to replenish the RX ring descriptor.
1483 * If the allocation fails:
1484 * - arrange for that RX descriptor to be the first one
1485 * being parsed the next time the receive function is
1486 * invoked [on the same queue].
1488 * - Stop parsing the RX ring and return immediately.
1490 * This policy do not drop the packet received in the RX
1491 * descriptor for which the allocation of a new mbuf failed.
1492 * Thus, it allows that packet to be later retrieved if
1493 * mbuf have been freed in the mean time.
1494 * As a side effect, holding RX descriptors instead of
1495 * systematically giving them back to the NIC may lead to
1496 * RX ring exhaustion situations.
1497 * However, the NIC can gracefully prevent such situations
1498 * to happen by sending specific "back-pressure" flow control
1499 * frames to its peer(s).
1501 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1502 "ext_err_stat=0x%08x pkt_len=%u",
1503 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1504 (uint16_t)rx_id, (uint32_t)staterr,
1505 (uint16_t)rte_le_to_cpu_16(rxd.qw1.hi.len));
1507 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1509 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
1510 "queue_id=%u", (uint16_t)rxq->port_id,
1511 (uint16_t)rxq->queue_id);
1512 dev->data->rx_mbuf_alloc_failed++;
1517 rxe = &sw_ring[rx_id];
1519 if (rx_id == rxq->nb_rx_desc)
1522 /* Prefetch next mbuf while processing current one. */
1523 rte_txgbe_prefetch(sw_ring[rx_id].mbuf);
1526 * When next RX descriptor is on a cache-line boundary,
1527 * prefetch the next 4 RX descriptors and the next 8 pointers
1530 if ((rx_id & 0x3) == 0) {
1531 rte_txgbe_prefetch(&rx_ring[rx_id]);
1532 rte_txgbe_prefetch(&sw_ring[rx_id]);
1537 dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1538 TXGBE_RXD_HDRADDR(rxdp, 0);
1539 TXGBE_RXD_PKTADDR(rxdp, dma_addr);
1542 * Initialize the returned mbuf.
1543 * 1) setup generic mbuf fields:
1544 * - number of segments,
1547 * - RX port identifier.
1548 * 2) integrate hardware offload data, if any:
1549 * - RSS flag & hash,
1550 * - IP checksum flag,
1551 * - VLAN TCI, if any,
1554 pkt_len = (uint16_t)(rte_le_to_cpu_16(rxd.qw1.hi.len) -
1556 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1557 rte_packet_prefetch((char *)rxm->buf_addr + rxm->data_off);
1560 rxm->pkt_len = pkt_len;
1561 rxm->data_len = pkt_len;
1562 rxm->port = rxq->port_id;
1564 pkt_info = rte_le_to_cpu_32(rxd.qw0.dw0);
1565 /* Only valid if RTE_MBUF_F_RX_VLAN set in pkt_flags */
1566 rxm->vlan_tci = rte_le_to_cpu_16(rxd.qw1.hi.tag);
1568 pkt_flags = rx_desc_status_to_pkt_flags(staterr,
1570 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1571 pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1572 rxm->ol_flags = pkt_flags;
1573 rxm->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1574 rxq->pkt_type_mask);
1576 if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) {
1577 rxm->hash.rss = rte_le_to_cpu_32(rxd.qw0.dw1);
1578 } else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
1579 rxm->hash.fdir.hash =
1580 rte_le_to_cpu_16(rxd.qw0.hi.csum) &
1581 TXGBE_ATR_HASH_MASK;
1582 rxm->hash.fdir.id = rte_le_to_cpu_16(rxd.qw0.hi.ipid);
1585 * Store the mbuf address into the next entry of the array
1586 * of returned packets.
1588 rx_pkts[nb_rx++] = rxm;
1590 rxq->rx_tail = rx_id;
1593 * If the number of free RX descriptors is greater than the RX free
1594 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1596 * Update the RDT with the value of the last processed RX descriptor
1597 * minus 1, to guarantee that the RDT register is never equal to the
1598 * RDH register, which creates a "full" ring situation from the
1599 * hardware point of view...
1601 nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
1602 if (nb_hold > rxq->rx_free_thresh) {
1603 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1604 "nb_hold=%u nb_rx=%u",
1605 (uint16_t)rxq->port_id, (uint16_t)rxq->queue_id,
1606 (uint16_t)rx_id, (uint16_t)nb_hold,
1608 rx_id = (uint16_t)((rx_id == 0) ?
1609 (rxq->nb_rx_desc - 1) : (rx_id - 1));
1610 txgbe_set32(rxq->rdt_reg_addr, rx_id);
1613 rxq->nb_rx_hold = nb_hold;
1618 * txgbe_fill_cluster_head_buf - fill the first mbuf of the returned packet
1620 * Fill the following info in the HEAD buffer of the Rx cluster:
1621 * - RX port identifier
1622 * - hardware offload data, if any:
1624 * - IP checksum flag
1625 * - VLAN TCI, if any
1627 * @head HEAD of the packet cluster
1628 * @desc HW descriptor to get data from
1629 * @rxq Pointer to the Rx queue
1632 txgbe_fill_cluster_head_buf(struct rte_mbuf *head, struct txgbe_rx_desc *desc,
1633 struct txgbe_rx_queue *rxq, uint32_t staterr)
1638 head->port = rxq->port_id;
1640 /* The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
1641 * set in the pkt_flags field.
1643 head->vlan_tci = rte_le_to_cpu_16(desc->qw1.hi.tag);
1644 pkt_info = rte_le_to_cpu_32(desc->qw0.dw0);
1645 pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags);
1646 pkt_flags |= rx_desc_error_to_pkt_flags(staterr);
1647 pkt_flags |= txgbe_rxd_pkt_info_to_pkt_flags(pkt_info);
1648 head->ol_flags = pkt_flags;
1649 head->packet_type = txgbe_rxd_pkt_info_to_pkt_type(pkt_info,
1650 rxq->pkt_type_mask);
1652 if (likely(pkt_flags & RTE_MBUF_F_RX_RSS_HASH)) {
1653 head->hash.rss = rte_le_to_cpu_32(desc->qw0.dw1);
1654 } else if (pkt_flags & RTE_MBUF_F_RX_FDIR) {
1655 head->hash.fdir.hash = rte_le_to_cpu_16(desc->qw0.hi.csum)
1656 & TXGBE_ATR_HASH_MASK;
1657 head->hash.fdir.id = rte_le_to_cpu_16(desc->qw0.hi.ipid);
1662 * txgbe_recv_pkts_lro - receive handler for and LRO case.
1664 * @rx_queue Rx queue handle
1665 * @rx_pkts table of received packets
1666 * @nb_pkts size of rx_pkts table
1667 * @bulk_alloc if TRUE bulk allocation is used for a HW ring refilling
1669 * Handles the Rx HW ring completions when RSC feature is configured. Uses an
1670 * additional ring of txgbe_rsc_entry's that will hold the relevant RSC info.
1672 * We use the same logic as in Linux and in FreeBSD txgbe drivers:
1673 * 1) When non-EOP RSC completion arrives:
1674 * a) Update the HEAD of the current RSC aggregation cluster with the new
1675 * segment's data length.
1676 * b) Set the "next" pointer of the current segment to point to the segment
1677 * at the NEXTP index.
1678 * c) Pass the HEAD of RSC aggregation cluster on to the next NEXTP entry
1679 * in the sw_rsc_ring.
1680 * 2) When EOP arrives we just update the cluster's total length and offload
1681 * flags and deliver the cluster up to the upper layers. In our case - put it
1682 * in the rx_pkts table.
1684 * Returns the number of received packets/clusters (according to the "bulk
1685 * receive" interface).
1687 static inline uint16_t
1688 txgbe_recv_pkts_lro(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts,
1691 struct txgbe_rx_queue *rxq = rx_queue;
1692 struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id];
1693 volatile struct txgbe_rx_desc *rx_ring = rxq->rx_ring;
1694 struct txgbe_rx_entry *sw_ring = rxq->sw_ring;
1695 struct txgbe_scattered_rx_entry *sw_sc_ring = rxq->sw_sc_ring;
1696 uint16_t rx_id = rxq->rx_tail;
1698 uint16_t nb_hold = rxq->nb_rx_hold;
1699 uint16_t prev_id = rxq->rx_tail;
1701 while (nb_rx < nb_pkts) {
1703 struct txgbe_rx_entry *rxe;
1704 struct txgbe_scattered_rx_entry *sc_entry;
1705 struct txgbe_scattered_rx_entry *next_sc_entry = NULL;
1706 struct txgbe_rx_entry *next_rxe = NULL;
1707 struct rte_mbuf *first_seg;
1708 struct rte_mbuf *rxm;
1709 struct rte_mbuf *nmb = NULL;
1710 struct txgbe_rx_desc rxd;
1713 volatile struct txgbe_rx_desc *rxdp;
1718 * The code in this whole file uses the volatile pointer to
1719 * ensure the read ordering of the status and the rest of the
1720 * descriptor fields (on the compiler level only!!!). This is so
1721 * UGLY - why not to just use the compiler barrier instead? DPDK
1722 * even has the rte_compiler_barrier() for that.
1724 * But most importantly this is just wrong because this doesn't
1725 * ensure memory ordering in a general case at all. For
1726 * instance, DPDK is supposed to work on Power CPUs where
1727 * compiler barrier may just not be enough!
1729 * I tried to write only this function properly to have a
1730 * starting point (as a part of an LRO/RSC series) but the
1731 * compiler cursed at me when I tried to cast away the
1732 * "volatile" from rx_ring (yes, it's volatile too!!!). So, I'm
1733 * keeping it the way it is for now.
1735 * The code in this file is broken in so many other places and
1736 * will just not work on a big endian CPU anyway therefore the
1737 * lines below will have to be revisited together with the rest
1741 * - Get rid of "volatile" and let the compiler do its job.
1742 * - Use the proper memory barrier (rte_rmb()) to ensure the
1743 * memory ordering below.
1745 rxdp = &rx_ring[rx_id];
1746 staterr = rte_le_to_cpu_32(rxdp->qw1.lo.status);
1748 if (!(staterr & TXGBE_RXD_STAT_DD))
1753 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_id=%u "
1754 "staterr=0x%x data_len=%u",
1755 rxq->port_id, rxq->queue_id, rx_id, staterr,
1756 rte_le_to_cpu_16(rxd.qw1.hi.len));
1759 nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
1761 PMD_RX_LOG(DEBUG, "RX mbuf alloc failed "
1762 "port_id=%u queue_id=%u",
1763 rxq->port_id, rxq->queue_id);
1765 dev->data->rx_mbuf_alloc_failed++;
1768 } else if (nb_hold > rxq->rx_free_thresh) {
1769 uint16_t next_rdt = rxq->rx_free_trigger;
1771 if (!txgbe_rx_alloc_bufs(rxq, false)) {
1773 txgbe_set32_relaxed(rxq->rdt_reg_addr,
1775 nb_hold -= rxq->rx_free_thresh;
1777 PMD_RX_LOG(DEBUG, "RX bulk alloc failed "
1778 "port_id=%u queue_id=%u",
1779 rxq->port_id, rxq->queue_id);
1781 dev->data->rx_mbuf_alloc_failed++;
1787 rxe = &sw_ring[rx_id];
1788 eop = staterr & TXGBE_RXD_STAT_EOP;
1790 next_id = rx_id + 1;
1791 if (next_id == rxq->nb_rx_desc)
1794 /* Prefetch next mbuf while processing current one. */
1795 rte_txgbe_prefetch(sw_ring[next_id].mbuf);
1798 * When next RX descriptor is on a cache-line boundary,
1799 * prefetch the next 4 RX descriptors and the next 4 pointers
1802 if ((next_id & 0x3) == 0) {
1803 rte_txgbe_prefetch(&rx_ring[next_id]);
1804 rte_txgbe_prefetch(&sw_ring[next_id]);
1811 rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
1813 * Update RX descriptor with the physical address of the
1814 * new data buffer of the new allocated mbuf.
1818 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1819 TXGBE_RXD_HDRADDR(rxdp, 0);
1820 TXGBE_RXD_PKTADDR(rxdp, dma);
1826 * Set data length & data buffer address of mbuf.
1828 data_len = rte_le_to_cpu_16(rxd.qw1.hi.len);
1829 rxm->data_len = data_len;
1834 * Get next descriptor index:
1835 * - For RSC it's in the NEXTP field.
1836 * - For a scattered packet - it's just a following
1839 if (TXGBE_RXD_RSCCNT(rxd.qw0.dw0))
1840 nextp_id = TXGBE_RXD_NEXTP(staterr);
1844 next_sc_entry = &sw_sc_ring[nextp_id];
1845 next_rxe = &sw_ring[nextp_id];
1846 rte_txgbe_prefetch(next_rxe);
1849 sc_entry = &sw_sc_ring[rx_id];
1850 first_seg = sc_entry->fbuf;
1851 sc_entry->fbuf = NULL;
1854 * If this is the first buffer of the received packet,
1855 * set the pointer to the first mbuf of the packet and
1856 * initialize its context.
1857 * Otherwise, update the total length and the number of segments
1858 * of the current scattered packet, and update the pointer to
1859 * the last mbuf of the current packet.
1861 if (first_seg == NULL) {
1863 first_seg->pkt_len = data_len;
1864 first_seg->nb_segs = 1;
1866 first_seg->pkt_len += data_len;
1867 first_seg->nb_segs++;
1874 * If this is not the last buffer of the received packet, update
1875 * the pointer to the first mbuf at the NEXTP entry in the
1876 * sw_sc_ring and continue to parse the RX ring.
1878 if (!eop && next_rxe) {
1879 rxm->next = next_rxe->mbuf;
1880 next_sc_entry->fbuf = first_seg;
1884 /* Initialize the first mbuf of the returned packet */
1885 txgbe_fill_cluster_head_buf(first_seg, &rxd, rxq, staterr);
1888 * Deal with the case, when HW CRC srip is disabled.
1889 * That can't happen when LRO is enabled, but still could
1890 * happen for scattered RX mode.
1892 first_seg->pkt_len -= rxq->crc_len;
1893 if (unlikely(rxm->data_len <= rxq->crc_len)) {
1894 struct rte_mbuf *lp;
1896 for (lp = first_seg; lp->next != rxm; lp = lp->next)
1899 first_seg->nb_segs--;
1900 lp->data_len -= rxq->crc_len - rxm->data_len;
1902 rte_pktmbuf_free_seg(rxm);
1904 rxm->data_len -= rxq->crc_len;
1907 /* Prefetch data of first segment, if configured to do so. */
1908 rte_packet_prefetch((char *)first_seg->buf_addr +
1909 first_seg->data_off);
1912 * Store the mbuf address into the next entry of the array
1913 * of returned packets.
1915 rx_pkts[nb_rx++] = first_seg;
1919 * Record index of the next RX descriptor to probe.
1921 rxq->rx_tail = rx_id;
1924 * If the number of free RX descriptors is greater than the RX free
1925 * threshold of the queue, advance the Receive Descriptor Tail (RDT)
1927 * Update the RDT with the value of the last processed RX descriptor
1928 * minus 1, to guarantee that the RDT register is never equal to the
1929 * RDH register, which creates a "full" ring situation from the
1930 * hardware point of view...
1932 if (!bulk_alloc && nb_hold > rxq->rx_free_thresh) {
1933 PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
1934 "nb_hold=%u nb_rx=%u",
1935 rxq->port_id, rxq->queue_id, rx_id, nb_hold, nb_rx);
1938 txgbe_set32_relaxed(rxq->rdt_reg_addr, prev_id);
1942 rxq->nb_rx_hold = nb_hold;
1947 txgbe_recv_pkts_lro_single_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1950 return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, false);
1954 txgbe_recv_pkts_lro_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts,
1957 return txgbe_recv_pkts_lro(rx_queue, rx_pkts, nb_pkts, true);
1961 txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev __rte_unused)
1963 return RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
1967 txgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
1970 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
1971 struct rte_eth_dev_sriov *sriov = &RTE_ETH_DEV_SRIOV(dev);
1973 offloads = RTE_ETH_RX_OFFLOAD_IPV4_CKSUM |
1974 RTE_ETH_RX_OFFLOAD_UDP_CKSUM |
1975 RTE_ETH_RX_OFFLOAD_TCP_CKSUM |
1976 RTE_ETH_RX_OFFLOAD_KEEP_CRC |
1977 RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1978 RTE_ETH_RX_OFFLOAD_RSS_HASH |
1979 RTE_ETH_RX_OFFLOAD_SCATTER;
1981 if (!txgbe_is_vf(dev))
1982 offloads |= (RTE_ETH_RX_OFFLOAD_VLAN_FILTER |
1983 RTE_ETH_RX_OFFLOAD_QINQ_STRIP |
1984 RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
1987 * RSC is only supported by PF devices in a non-SR-IOV
1990 if (hw->mac.type == txgbe_mac_raptor && !sriov->active)
1991 offloads |= RTE_ETH_RX_OFFLOAD_TCP_LRO;
1993 if (hw->mac.type == txgbe_mac_raptor)
1994 offloads |= RTE_ETH_RX_OFFLOAD_MACSEC_STRIP;
1996 offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1998 #ifdef RTE_LIB_SECURITY
1999 if (dev->security_ctx)
2000 offloads |= RTE_ETH_RX_OFFLOAD_SECURITY;
2006 static void __rte_cold
2007 txgbe_tx_queue_release_mbufs(struct txgbe_tx_queue *txq)
2011 if (txq->sw_ring != NULL) {
2012 for (i = 0; i < txq->nb_tx_desc; i++) {
2013 if (txq->sw_ring[i].mbuf != NULL) {
2014 rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf);
2015 txq->sw_ring[i].mbuf = NULL;
2022 txgbe_tx_done_cleanup_full(struct txgbe_tx_queue *txq, uint32_t free_cnt)
2024 struct txgbe_tx_entry *swr_ring = txq->sw_ring;
2025 uint16_t i, tx_last, tx_id;
2026 uint16_t nb_tx_free_last;
2027 uint16_t nb_tx_to_clean;
2030 /* Start free mbuf from the next of tx_tail */
2031 tx_last = txq->tx_tail;
2032 tx_id = swr_ring[tx_last].next_id;
2034 if (txq->nb_tx_free == 0 && txgbe_xmit_cleanup(txq))
2037 nb_tx_to_clean = txq->nb_tx_free;
2038 nb_tx_free_last = txq->nb_tx_free;
2040 free_cnt = txq->nb_tx_desc;
2042 /* Loop through swr_ring to count the amount of
2043 * freeable mubfs and packets.
2045 for (pkt_cnt = 0; pkt_cnt < free_cnt; ) {
2046 for (i = 0; i < nb_tx_to_clean &&
2047 pkt_cnt < free_cnt &&
2048 tx_id != tx_last; i++) {
2049 if (swr_ring[tx_id].mbuf != NULL) {
2050 rte_pktmbuf_free_seg(swr_ring[tx_id].mbuf);
2051 swr_ring[tx_id].mbuf = NULL;
2054 * last segment in the packet,
2055 * increment packet count
2057 pkt_cnt += (swr_ring[tx_id].last_id == tx_id);
2060 tx_id = swr_ring[tx_id].next_id;
2063 if (pkt_cnt < free_cnt) {
2064 if (txgbe_xmit_cleanup(txq))
2067 nb_tx_to_clean = txq->nb_tx_free - nb_tx_free_last;
2068 nb_tx_free_last = txq->nb_tx_free;
2072 return (int)pkt_cnt;
2076 txgbe_tx_done_cleanup_simple(struct txgbe_tx_queue *txq,
2081 if (free_cnt == 0 || free_cnt > txq->nb_tx_desc)
2082 free_cnt = txq->nb_tx_desc;
2084 cnt = free_cnt - free_cnt % txq->tx_free_thresh;
2086 for (i = 0; i < cnt; i += n) {
2087 if (txq->nb_tx_desc - txq->nb_tx_free < txq->tx_free_thresh)
2090 n = txgbe_tx_free_bufs(txq);
2100 txgbe_dev_tx_done_cleanup(void *tx_queue, uint32_t free_cnt)
2102 struct txgbe_tx_queue *txq = (struct txgbe_tx_queue *)tx_queue;
2103 if (txq->offloads == 0 &&
2104 #ifdef RTE_LIB_SECURITY
2105 !(txq->using_ipsec) &&
2107 txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST)
2108 return txgbe_tx_done_cleanup_simple(txq, free_cnt);
2110 return txgbe_tx_done_cleanup_full(txq, free_cnt);
2113 static void __rte_cold
2114 txgbe_tx_free_swring(struct txgbe_tx_queue *txq)
2117 txq->sw_ring != NULL)
2118 rte_free(txq->sw_ring);
2121 static void __rte_cold
2122 txgbe_tx_queue_release(struct txgbe_tx_queue *txq)
2124 if (txq != NULL && txq->ops != NULL) {
2125 txq->ops->release_mbufs(txq);
2126 txq->ops->free_swring(txq);
2132 txgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
2134 txgbe_tx_queue_release(dev->data->tx_queues[qid]);
2137 /* (Re)set dynamic txgbe_tx_queue fields to defaults */
2138 static void __rte_cold
2139 txgbe_reset_tx_queue(struct txgbe_tx_queue *txq)
2141 static const struct txgbe_tx_desc zeroed_desc = {0};
2142 struct txgbe_tx_entry *txe = txq->sw_ring;
2145 /* Zero out HW ring memory */
2146 for (i = 0; i < txq->nb_tx_desc; i++)
2147 txq->tx_ring[i] = zeroed_desc;
2149 /* Initialize SW ring entries */
2150 prev = (uint16_t)(txq->nb_tx_desc - 1);
2151 for (i = 0; i < txq->nb_tx_desc; i++) {
2152 volatile struct txgbe_tx_desc *txd = &txq->tx_ring[i];
2154 txd->dw3 = rte_cpu_to_le_32(TXGBE_TXD_DD);
2157 txe[prev].next_id = i;
2161 txq->tx_next_dd = (uint16_t)(txq->tx_free_thresh - 1);
2165 * Always allow 1 descriptor to be un-allocated to avoid
2166 * a H/W race condition
2168 txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1);
2169 txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1);
2171 memset((void *)&txq->ctx_cache, 0,
2172 TXGBE_CTX_NUM * sizeof(struct txgbe_ctx_info));
2175 static const struct txgbe_txq_ops def_txq_ops = {
2176 .release_mbufs = txgbe_tx_queue_release_mbufs,
2177 .free_swring = txgbe_tx_free_swring,
2178 .reset = txgbe_reset_tx_queue,
2181 /* Takes an ethdev and a queue and sets up the tx function to be used based on
2182 * the queue parameters. Used in tx_queue_setup by primary process and then
2183 * in dev_init by secondary process when attaching to an existing ethdev.
2186 txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq)
2188 /* Use a simple Tx queue (no offloads, no multi segs) if possible */
2189 if (txq->offloads == 0 &&
2190 #ifdef RTE_LIB_SECURITY
2191 !(txq->using_ipsec) &&
2193 txq->tx_free_thresh >= RTE_PMD_TXGBE_TX_MAX_BURST) {
2194 PMD_INIT_LOG(DEBUG, "Using simple tx code path");
2195 dev->tx_pkt_burst = txgbe_xmit_pkts_simple;
2196 dev->tx_pkt_prepare = NULL;
2198 PMD_INIT_LOG(DEBUG, "Using full-featured tx code path");
2200 " - offloads = 0x%" PRIx64,
2203 " - tx_free_thresh = %lu [RTE_PMD_TXGBE_TX_MAX_BURST=%lu]",
2204 (unsigned long)txq->tx_free_thresh,
2205 (unsigned long)RTE_PMD_TXGBE_TX_MAX_BURST);
2206 dev->tx_pkt_burst = txgbe_xmit_pkts;
2207 dev->tx_pkt_prepare = txgbe_prep_pkts;
2212 txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev)
2220 txgbe_get_tx_port_offloads(struct rte_eth_dev *dev)
2222 uint64_t tx_offload_capa;
2225 RTE_ETH_TX_OFFLOAD_VLAN_INSERT |
2226 RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
2227 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
2228 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
2229 RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |
2230 RTE_ETH_TX_OFFLOAD_TCP_TSO |
2231 RTE_ETH_TX_OFFLOAD_UDP_TSO |
2232 RTE_ETH_TX_OFFLOAD_UDP_TNL_TSO |
2233 RTE_ETH_TX_OFFLOAD_IP_TNL_TSO |
2234 RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
2235 RTE_ETH_TX_OFFLOAD_GRE_TNL_TSO |
2236 RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO |
2237 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO |
2238 RTE_ETH_TX_OFFLOAD_MULTI_SEGS;
2240 if (!txgbe_is_vf(dev))
2241 tx_offload_capa |= RTE_ETH_TX_OFFLOAD_QINQ_INSERT;
2243 tx_offload_capa |= RTE_ETH_TX_OFFLOAD_MACSEC_INSERT;
2245 tx_offload_capa |= RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM |
2246 RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM;
2248 #ifdef RTE_LIB_SECURITY
2249 if (dev->security_ctx)
2250 tx_offload_capa |= RTE_ETH_TX_OFFLOAD_SECURITY;
2252 return tx_offload_capa;
2256 txgbe_dev_tx_queue_setup(struct rte_eth_dev *dev,
2259 unsigned int socket_id,
2260 const struct rte_eth_txconf *tx_conf)
2262 const struct rte_memzone *tz;
2263 struct txgbe_tx_queue *txq;
2264 struct txgbe_hw *hw;
2265 uint16_t tx_free_thresh;
2268 PMD_INIT_FUNC_TRACE();
2269 hw = TXGBE_DEV_HW(dev);
2271 offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads;
2274 * Validate number of transmit descriptors.
2275 * It must not exceed hardware maximum, and must be multiple
2278 if (nb_desc % TXGBE_TXD_ALIGN != 0 ||
2279 nb_desc > TXGBE_RING_DESC_MAX ||
2280 nb_desc < TXGBE_RING_DESC_MIN) {
2285 * The TX descriptor ring will be cleaned after txq->tx_free_thresh
2286 * descriptors are used or if the number of descriptors required
2287 * to transmit a packet is greater than the number of free TX
2289 * One descriptor in the TX ring is used as a sentinel to avoid a
2290 * H/W race condition, hence the maximum threshold constraints.
2291 * When set to zero use default values.
2293 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
2294 tx_conf->tx_free_thresh : DEFAULT_TX_FREE_THRESH);
2295 if (tx_free_thresh >= (nb_desc - 3)) {
2296 PMD_INIT_LOG(ERR, "tx_free_thresh must be less than the number of "
2297 "TX descriptors minus 3. (tx_free_thresh=%u "
2298 "port=%d queue=%d)",
2299 (unsigned int)tx_free_thresh,
2300 (int)dev->data->port_id, (int)queue_idx);
2304 if ((nb_desc % tx_free_thresh) != 0) {
2305 PMD_INIT_LOG(ERR, "tx_free_thresh must be a divisor of the "
2306 "number of TX descriptors. (tx_free_thresh=%u "
2307 "port=%d queue=%d)", (unsigned int)tx_free_thresh,
2308 (int)dev->data->port_id, (int)queue_idx);
2312 /* Free memory prior to re-allocation if needed... */
2313 if (dev->data->tx_queues[queue_idx] != NULL) {
2314 txgbe_tx_queue_release(dev->data->tx_queues[queue_idx]);
2315 dev->data->tx_queues[queue_idx] = NULL;
2318 /* First allocate the tx queue data structure */
2319 txq = rte_zmalloc_socket("ethdev TX queue",
2320 sizeof(struct txgbe_tx_queue),
2321 RTE_CACHE_LINE_SIZE, socket_id);
2326 * Allocate TX ring hardware descriptors. A memzone large enough to
2327 * handle the maximum ring size is allocated in order to allow for
2328 * resizing in later calls to the queue setup function.
2330 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
2331 sizeof(struct txgbe_tx_desc) * TXGBE_RING_DESC_MAX,
2332 TXGBE_ALIGN, socket_id);
2334 txgbe_tx_queue_release(txq);
2338 txq->nb_tx_desc = nb_desc;
2339 txq->tx_free_thresh = tx_free_thresh;
2340 txq->pthresh = tx_conf->tx_thresh.pthresh;
2341 txq->hthresh = tx_conf->tx_thresh.hthresh;
2342 txq->wthresh = tx_conf->tx_thresh.wthresh;
2343 txq->queue_id = queue_idx;
2344 txq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2345 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2346 txq->port_id = dev->data->port_id;
2347 txq->offloads = offloads;
2348 txq->ops = &def_txq_ops;
2349 txq->tx_deferred_start = tx_conf->tx_deferred_start;
2350 #ifdef RTE_LIB_SECURITY
2351 txq->using_ipsec = !!(dev->data->dev_conf.txmode.offloads &
2352 RTE_ETH_TX_OFFLOAD_SECURITY);
2355 /* Modification to set tail pointer for virtual function
2356 * if vf is detected.
2358 if (hw->mac.type == txgbe_mac_raptor_vf) {
2359 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXWP(queue_idx));
2360 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw, TXGBE_TXCFG(queue_idx));
2362 txq->tdt_reg_addr = TXGBE_REG_ADDR(hw,
2363 TXGBE_TXWP(txq->reg_idx));
2364 txq->tdc_reg_addr = TXGBE_REG_ADDR(hw,
2365 TXGBE_TXCFG(txq->reg_idx));
2368 txq->tx_ring_phys_addr = TMZ_PADDR(tz);
2369 txq->tx_ring = (struct txgbe_tx_desc *)TMZ_VADDR(tz);
2371 /* Allocate software ring */
2372 txq->sw_ring = rte_zmalloc_socket("txq->sw_ring",
2373 sizeof(struct txgbe_tx_entry) * nb_desc,
2374 RTE_CACHE_LINE_SIZE, socket_id);
2375 if (txq->sw_ring == NULL) {
2376 txgbe_tx_queue_release(txq);
2379 PMD_INIT_LOG(DEBUG, "sw_ring=%p hw_ring=%p dma_addr=0x%" PRIx64,
2380 txq->sw_ring, txq->tx_ring, txq->tx_ring_phys_addr);
2382 /* set up scalar TX function as appropriate */
2383 txgbe_set_tx_function(dev, txq);
2385 txq->ops->reset(txq);
2387 dev->data->tx_queues[queue_idx] = txq;
2393 * txgbe_free_sc_cluster - free the not-yet-completed scattered cluster
2395 * The "next" pointer of the last segment of (not-yet-completed) RSC clusters
2396 * in the sw_rsc_ring is not set to NULL but rather points to the next
2397 * mbuf of this RSC aggregation (that has not been completed yet and still
2398 * resides on the HW ring). So, instead of calling for rte_pktmbuf_free() we
2399 * will just free first "nb_segs" segments of the cluster explicitly by calling
2400 * an rte_pktmbuf_free_seg().
2402 * @m scattered cluster head
2404 static void __rte_cold
2405 txgbe_free_sc_cluster(struct rte_mbuf *m)
2407 uint16_t i, nb_segs = m->nb_segs;
2408 struct rte_mbuf *next_seg;
2410 for (i = 0; i < nb_segs; i++) {
2412 rte_pktmbuf_free_seg(m);
2417 static void __rte_cold
2418 txgbe_rx_queue_release_mbufs(struct txgbe_rx_queue *rxq)
2422 if (rxq->sw_ring != NULL) {
2423 for (i = 0; i < rxq->nb_rx_desc; i++) {
2424 if (rxq->sw_ring[i].mbuf != NULL) {
2425 rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf);
2426 rxq->sw_ring[i].mbuf = NULL;
2429 if (rxq->rx_nb_avail) {
2430 for (i = 0; i < rxq->rx_nb_avail; ++i) {
2431 struct rte_mbuf *mb;
2433 mb = rxq->rx_stage[rxq->rx_next_avail + i];
2434 rte_pktmbuf_free_seg(mb);
2436 rxq->rx_nb_avail = 0;
2440 if (rxq->sw_sc_ring)
2441 for (i = 0; i < rxq->nb_rx_desc; i++)
2442 if (rxq->sw_sc_ring[i].fbuf) {
2443 txgbe_free_sc_cluster(rxq->sw_sc_ring[i].fbuf);
2444 rxq->sw_sc_ring[i].fbuf = NULL;
2448 static void __rte_cold
2449 txgbe_rx_queue_release(struct txgbe_rx_queue *rxq)
2452 txgbe_rx_queue_release_mbufs(rxq);
2453 rte_free(rxq->sw_ring);
2454 rte_free(rxq->sw_sc_ring);
2460 txgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
2462 txgbe_rx_queue_release(dev->data->rx_queues[qid]);
2466 * Check if Rx Burst Bulk Alloc function can be used.
2468 * 0: the preconditions are satisfied and the bulk allocation function
2470 * -EINVAL: the preconditions are NOT satisfied and the default Rx burst
2471 * function must be used.
2473 static inline int __rte_cold
2474 check_rx_burst_bulk_alloc_preconditions(struct txgbe_rx_queue *rxq)
2479 * Make sure the following pre-conditions are satisfied:
2480 * rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST
2481 * rxq->rx_free_thresh < rxq->nb_rx_desc
2482 * (rxq->nb_rx_desc % rxq->rx_free_thresh) == 0
2483 * Scattered packets are not supported. This should be checked
2484 * outside of this function.
2486 if (!(rxq->rx_free_thresh >= RTE_PMD_TXGBE_RX_MAX_BURST)) {
2487 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2488 "rxq->rx_free_thresh=%d, "
2489 "RTE_PMD_TXGBE_RX_MAX_BURST=%d",
2490 rxq->rx_free_thresh, RTE_PMD_TXGBE_RX_MAX_BURST);
2492 } else if (!(rxq->rx_free_thresh < rxq->nb_rx_desc)) {
2493 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2494 "rxq->rx_free_thresh=%d, "
2495 "rxq->nb_rx_desc=%d",
2496 rxq->rx_free_thresh, rxq->nb_rx_desc);
2498 } else if (!((rxq->nb_rx_desc % rxq->rx_free_thresh) == 0)) {
2499 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
2500 "rxq->nb_rx_desc=%d, "
2501 "rxq->rx_free_thresh=%d",
2502 rxq->nb_rx_desc, rxq->rx_free_thresh);
2509 /* Reset dynamic txgbe_rx_queue fields back to defaults */
2510 static void __rte_cold
2511 txgbe_reset_rx_queue(struct txgbe_adapter *adapter, struct txgbe_rx_queue *rxq)
2513 static const struct txgbe_rx_desc zeroed_desc = {
2514 {{0}, {0} }, {{0}, {0} } };
2516 uint16_t len = rxq->nb_rx_desc;
2519 * By default, the Rx queue setup function allocates enough memory for
2520 * TXGBE_RING_DESC_MAX. The Rx Burst bulk allocation function requires
2521 * extra memory at the end of the descriptor ring to be zero'd out.
2523 if (adapter->rx_bulk_alloc_allowed)
2524 /* zero out extra memory */
2525 len += RTE_PMD_TXGBE_RX_MAX_BURST;
2528 * Zero out HW ring memory. Zero out extra memory at the end of
2529 * the H/W ring so look-ahead logic in Rx Burst bulk alloc function
2530 * reads extra memory as zeros.
2532 for (i = 0; i < len; i++)
2533 rxq->rx_ring[i] = zeroed_desc;
2536 * initialize extra software ring entries. Space for these extra
2537 * entries is always allocated
2539 memset(&rxq->fake_mbuf, 0x0, sizeof(rxq->fake_mbuf));
2540 for (i = rxq->nb_rx_desc; i < len; ++i)
2541 rxq->sw_ring[i].mbuf = &rxq->fake_mbuf;
2543 rxq->rx_nb_avail = 0;
2544 rxq->rx_next_avail = 0;
2545 rxq->rx_free_trigger = (uint16_t)(rxq->rx_free_thresh - 1);
2547 rxq->nb_rx_hold = 0;
2548 rxq->pkt_first_seg = NULL;
2549 rxq->pkt_last_seg = NULL;
2553 txgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
2556 unsigned int socket_id,
2557 const struct rte_eth_rxconf *rx_conf,
2558 struct rte_mempool *mp)
2560 const struct rte_memzone *rz;
2561 struct txgbe_rx_queue *rxq;
2562 struct txgbe_hw *hw;
2564 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2567 PMD_INIT_FUNC_TRACE();
2568 hw = TXGBE_DEV_HW(dev);
2570 offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads;
2573 * Validate number of receive descriptors.
2574 * It must not exceed hardware maximum, and must be multiple
2577 if (nb_desc % TXGBE_RXD_ALIGN != 0 ||
2578 nb_desc > TXGBE_RING_DESC_MAX ||
2579 nb_desc < TXGBE_RING_DESC_MIN) {
2583 /* Free memory prior to re-allocation if needed... */
2584 if (dev->data->rx_queues[queue_idx] != NULL) {
2585 txgbe_rx_queue_release(dev->data->rx_queues[queue_idx]);
2586 dev->data->rx_queues[queue_idx] = NULL;
2589 /* First allocate the rx queue data structure */
2590 rxq = rte_zmalloc_socket("ethdev RX queue",
2591 sizeof(struct txgbe_rx_queue),
2592 RTE_CACHE_LINE_SIZE, socket_id);
2596 rxq->nb_rx_desc = nb_desc;
2597 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
2598 rxq->queue_id = queue_idx;
2599 rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
2600 queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
2601 rxq->port_id = dev->data->port_id;
2602 if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
2603 rxq->crc_len = RTE_ETHER_CRC_LEN;
2606 rxq->drop_en = rx_conf->rx_drop_en;
2607 rxq->rx_deferred_start = rx_conf->rx_deferred_start;
2608 rxq->offloads = offloads;
2611 * The packet type in RX descriptor is different for different NICs.
2612 * So set different masks for different NICs.
2614 rxq->pkt_type_mask = TXGBE_PTID_MASK;
2617 * Allocate RX ring hardware descriptors. A memzone large enough to
2618 * handle the maximum ring size is allocated in order to allow for
2619 * resizing in later calls to the queue setup function.
2621 rz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
2622 RX_RING_SZ, TXGBE_ALIGN, socket_id);
2624 txgbe_rx_queue_release(rxq);
2629 * Zero init all the descriptors in the ring.
2631 memset(rz->addr, 0, RX_RING_SZ);
2634 * Modified to setup VFRDT for Virtual Function
2636 if (hw->mac.type == txgbe_mac_raptor_vf) {
2638 TXGBE_REG_ADDR(hw, TXGBE_RXWP(queue_idx));
2640 TXGBE_REG_ADDR(hw, TXGBE_RXRP(queue_idx));
2643 TXGBE_REG_ADDR(hw, TXGBE_RXWP(rxq->reg_idx));
2645 TXGBE_REG_ADDR(hw, TXGBE_RXRP(rxq->reg_idx));
2648 rxq->rx_ring_phys_addr = TMZ_PADDR(rz);
2649 rxq->rx_ring = (struct txgbe_rx_desc *)TMZ_VADDR(rz);
2652 * Certain constraints must be met in order to use the bulk buffer
2653 * allocation Rx burst function. If any of Rx queues doesn't meet them
2654 * the feature should be disabled for the whole port.
2656 if (check_rx_burst_bulk_alloc_preconditions(rxq)) {
2657 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Rx Bulk Alloc "
2658 "preconditions - canceling the feature for "
2659 "the whole port[%d]",
2660 rxq->queue_id, rxq->port_id);
2661 adapter->rx_bulk_alloc_allowed = false;
2665 * Allocate software ring. Allow for space at the end of the
2666 * S/W ring to make sure look-ahead logic in bulk alloc Rx burst
2667 * function does not access an invalid memory region.
2670 if (adapter->rx_bulk_alloc_allowed)
2671 len += RTE_PMD_TXGBE_RX_MAX_BURST;
2673 rxq->sw_ring = rte_zmalloc_socket("rxq->sw_ring",
2674 sizeof(struct txgbe_rx_entry) * len,
2675 RTE_CACHE_LINE_SIZE, socket_id);
2676 if (!rxq->sw_ring) {
2677 txgbe_rx_queue_release(rxq);
2682 * Always allocate even if it's not going to be needed in order to
2683 * simplify the code.
2685 * This ring is used in LRO and Scattered Rx cases and Scattered Rx may
2686 * be requested in txgbe_dev_rx_init(), which is called later from
2690 rte_zmalloc_socket("rxq->sw_sc_ring",
2691 sizeof(struct txgbe_scattered_rx_entry) * len,
2692 RTE_CACHE_LINE_SIZE, socket_id);
2693 if (!rxq->sw_sc_ring) {
2694 txgbe_rx_queue_release(rxq);
2698 PMD_INIT_LOG(DEBUG, "sw_ring=%p sw_sc_ring=%p hw_ring=%p "
2699 "dma_addr=0x%" PRIx64,
2700 rxq->sw_ring, rxq->sw_sc_ring, rxq->rx_ring,
2701 rxq->rx_ring_phys_addr);
2703 dev->data->rx_queues[queue_idx] = rxq;
2705 txgbe_reset_rx_queue(adapter, rxq);
2711 txgbe_dev_rx_queue_count(void *rx_queue)
2713 #define TXGBE_RXQ_SCAN_INTERVAL 4
2714 volatile struct txgbe_rx_desc *rxdp;
2715 struct txgbe_rx_queue *rxq;
2719 rxdp = &rxq->rx_ring[rxq->rx_tail];
2721 while ((desc < rxq->nb_rx_desc) &&
2722 (rxdp->qw1.lo.status &
2723 rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))) {
2724 desc += TXGBE_RXQ_SCAN_INTERVAL;
2725 rxdp += TXGBE_RXQ_SCAN_INTERVAL;
2726 if (rxq->rx_tail + desc >= rxq->nb_rx_desc)
2727 rxdp = &(rxq->rx_ring[rxq->rx_tail +
2728 desc - rxq->nb_rx_desc]);
2735 txgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
2737 struct txgbe_rx_queue *rxq = rx_queue;
2738 volatile uint32_t *status;
2739 uint32_t nb_hold, desc;
2741 if (unlikely(offset >= rxq->nb_rx_desc))
2744 nb_hold = rxq->nb_rx_hold;
2745 if (offset >= rxq->nb_rx_desc - nb_hold)
2746 return RTE_ETH_RX_DESC_UNAVAIL;
2748 desc = rxq->rx_tail + offset;
2749 if (desc >= rxq->nb_rx_desc)
2750 desc -= rxq->nb_rx_desc;
2752 status = &rxq->rx_ring[desc].qw1.lo.status;
2753 if (*status & rte_cpu_to_le_32(TXGBE_RXD_STAT_DD))
2754 return RTE_ETH_RX_DESC_DONE;
2756 return RTE_ETH_RX_DESC_AVAIL;
2760 txgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
2762 struct txgbe_tx_queue *txq = tx_queue;
2763 volatile uint32_t *status;
2766 if (unlikely(offset >= txq->nb_tx_desc))
2769 desc = txq->tx_tail + offset;
2770 if (desc >= txq->nb_tx_desc) {
2771 desc -= txq->nb_tx_desc;
2772 if (desc >= txq->nb_tx_desc)
2773 desc -= txq->nb_tx_desc;
2776 status = &txq->tx_ring[desc].dw3;
2777 if (*status & rte_cpu_to_le_32(TXGBE_TXD_DD))
2778 return RTE_ETH_TX_DESC_DONE;
2780 return RTE_ETH_TX_DESC_FULL;
2784 txgbe_dev_clear_queues(struct rte_eth_dev *dev)
2787 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
2789 PMD_INIT_FUNC_TRACE();
2791 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2792 struct txgbe_tx_queue *txq = dev->data->tx_queues[i];
2795 txq->ops->release_mbufs(txq);
2796 txq->ops->reset(txq);
2800 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2801 struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
2804 txgbe_rx_queue_release_mbufs(rxq);
2805 txgbe_reset_rx_queue(adapter, rxq);
2811 txgbe_dev_free_queues(struct rte_eth_dev *dev)
2815 PMD_INIT_FUNC_TRACE();
2817 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2818 txgbe_dev_rx_queue_release(dev, i);
2819 dev->data->rx_queues[i] = NULL;
2821 dev->data->nb_rx_queues = 0;
2823 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2824 txgbe_dev_tx_queue_release(dev, i);
2825 dev->data->tx_queues[i] = NULL;
2827 dev->data->nb_tx_queues = 0;
2831 * Receive Side Scaling (RSS)
2834 * The source and destination IP addresses of the IP header and the source
2835 * and destination ports of TCP/UDP headers, if any, of received packets are
2836 * hashed against a configurable random key to compute a 32-bit RSS hash result.
2837 * The seven (7) LSBs of the 32-bit hash result are used as an index into a
2838 * 128-entry redirection table (RETA). Each entry of the RETA provides a 3-bit
2839 * RSS output index which is used as the RX queue index where to store the
2841 * The following output is supplied in the RX write-back descriptor:
2842 * - 32-bit result of the Microsoft RSS hash function,
2843 * - 4-bit RSS type field.
2847 * Used as the default key.
2849 static uint8_t rss_intel_key[40] = {
2850 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
2851 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
2852 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
2853 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
2854 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
2858 txgbe_rss_disable(struct rte_eth_dev *dev)
2860 struct txgbe_hw *hw;
2862 hw = TXGBE_DEV_HW(dev);
2863 if (hw->mac.type == txgbe_mac_raptor_vf)
2864 wr32m(hw, TXGBE_VFPLCFG, TXGBE_VFPLCFG_RSSENA, 0);
2866 wr32m(hw, TXGBE_RACTL, TXGBE_RACTL_RSSENA, 0);
2870 txgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
2871 struct rte_eth_rss_conf *rss_conf)
2873 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2880 if (!txgbe_rss_update_sp(hw->mac.type)) {
2881 PMD_DRV_LOG(ERR, "RSS hash update is not supported on this "
2886 hash_key = rss_conf->rss_key;
2888 /* Fill in RSS hash key */
2889 for (i = 0; i < 10; i++) {
2890 rss_key = LS32(hash_key[(i * 4) + 0], 0, 0xFF);
2891 rss_key |= LS32(hash_key[(i * 4) + 1], 8, 0xFF);
2892 rss_key |= LS32(hash_key[(i * 4) + 2], 16, 0xFF);
2893 rss_key |= LS32(hash_key[(i * 4) + 3], 24, 0xFF);
2894 wr32at(hw, TXGBE_REG_RSSKEY, i, rss_key);
2898 /* Set configured hashing protocols */
2899 rss_hf = rss_conf->rss_hf & TXGBE_RSS_OFFLOAD_ALL;
2900 if (hw->mac.type == txgbe_mac_raptor_vf) {
2901 mrqc = rd32(hw, TXGBE_VFPLCFG);
2902 mrqc &= ~TXGBE_VFPLCFG_RSSMASK;
2903 if (rss_hf & RTE_ETH_RSS_IPV4)
2904 mrqc |= TXGBE_VFPLCFG_RSSIPV4;
2905 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
2906 mrqc |= TXGBE_VFPLCFG_RSSIPV4TCP;
2907 if (rss_hf & RTE_ETH_RSS_IPV6 ||
2908 rss_hf & RTE_ETH_RSS_IPV6_EX)
2909 mrqc |= TXGBE_VFPLCFG_RSSIPV6;
2910 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
2911 rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
2912 mrqc |= TXGBE_VFPLCFG_RSSIPV6TCP;
2913 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
2914 mrqc |= TXGBE_VFPLCFG_RSSIPV4UDP;
2915 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
2916 rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
2917 mrqc |= TXGBE_VFPLCFG_RSSIPV6UDP;
2920 mrqc |= TXGBE_VFPLCFG_RSSENA;
2922 mrqc &= ~TXGBE_VFPLCFG_RSSENA;
2924 if (dev->data->nb_rx_queues > 3)
2925 mrqc |= TXGBE_VFPLCFG_RSSHASH(2);
2926 else if (dev->data->nb_rx_queues > 1)
2927 mrqc |= TXGBE_VFPLCFG_RSSHASH(1);
2929 wr32(hw, TXGBE_VFPLCFG, mrqc);
2931 mrqc = rd32(hw, TXGBE_RACTL);
2932 mrqc &= ~TXGBE_RACTL_RSSMASK;
2933 if (rss_hf & RTE_ETH_RSS_IPV4)
2934 mrqc |= TXGBE_RACTL_RSSIPV4;
2935 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
2936 mrqc |= TXGBE_RACTL_RSSIPV4TCP;
2937 if (rss_hf & RTE_ETH_RSS_IPV6 ||
2938 rss_hf & RTE_ETH_RSS_IPV6_EX)
2939 mrqc |= TXGBE_RACTL_RSSIPV6;
2940 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP ||
2941 rss_hf & RTE_ETH_RSS_IPV6_TCP_EX)
2942 mrqc |= TXGBE_RACTL_RSSIPV6TCP;
2943 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
2944 mrqc |= TXGBE_RACTL_RSSIPV4UDP;
2945 if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP ||
2946 rss_hf & RTE_ETH_RSS_IPV6_UDP_EX)
2947 mrqc |= TXGBE_RACTL_RSSIPV6UDP;
2950 mrqc |= TXGBE_RACTL_RSSENA;
2952 mrqc &= ~TXGBE_RACTL_RSSENA;
2954 wr32(hw, TXGBE_RACTL, mrqc);
2961 txgbe_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
2962 struct rte_eth_rss_conf *rss_conf)
2964 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2971 hash_key = rss_conf->rss_key;
2973 /* Return RSS hash key */
2974 for (i = 0; i < 10; i++) {
2975 rss_key = rd32at(hw, TXGBE_REG_RSSKEY, i);
2976 hash_key[(i * 4) + 0] = RS32(rss_key, 0, 0xFF);
2977 hash_key[(i * 4) + 1] = RS32(rss_key, 8, 0xFF);
2978 hash_key[(i * 4) + 2] = RS32(rss_key, 16, 0xFF);
2979 hash_key[(i * 4) + 3] = RS32(rss_key, 24, 0xFF);
2984 if (hw->mac.type == txgbe_mac_raptor_vf) {
2985 mrqc = rd32(hw, TXGBE_VFPLCFG);
2986 if (mrqc & TXGBE_VFPLCFG_RSSIPV4)
2987 rss_hf |= RTE_ETH_RSS_IPV4;
2988 if (mrqc & TXGBE_VFPLCFG_RSSIPV4TCP)
2989 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
2990 if (mrqc & TXGBE_VFPLCFG_RSSIPV6)
2991 rss_hf |= RTE_ETH_RSS_IPV6 |
2992 RTE_ETH_RSS_IPV6_EX;
2993 if (mrqc & TXGBE_VFPLCFG_RSSIPV6TCP)
2994 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
2995 RTE_ETH_RSS_IPV6_TCP_EX;
2996 if (mrqc & TXGBE_VFPLCFG_RSSIPV4UDP)
2997 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
2998 if (mrqc & TXGBE_VFPLCFG_RSSIPV6UDP)
2999 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
3000 RTE_ETH_RSS_IPV6_UDP_EX;
3001 if (!(mrqc & TXGBE_VFPLCFG_RSSENA))
3004 mrqc = rd32(hw, TXGBE_RACTL);
3005 if (mrqc & TXGBE_RACTL_RSSIPV4)
3006 rss_hf |= RTE_ETH_RSS_IPV4;
3007 if (mrqc & TXGBE_RACTL_RSSIPV4TCP)
3008 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
3009 if (mrqc & TXGBE_RACTL_RSSIPV6)
3010 rss_hf |= RTE_ETH_RSS_IPV6 |
3011 RTE_ETH_RSS_IPV6_EX;
3012 if (mrqc & TXGBE_RACTL_RSSIPV6TCP)
3013 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_TCP |
3014 RTE_ETH_RSS_IPV6_TCP_EX;
3015 if (mrqc & TXGBE_RACTL_RSSIPV4UDP)
3016 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
3017 if (mrqc & TXGBE_RACTL_RSSIPV6UDP)
3018 rss_hf |= RTE_ETH_RSS_NONFRAG_IPV6_UDP |
3019 RTE_ETH_RSS_IPV6_UDP_EX;
3020 if (!(mrqc & TXGBE_RACTL_RSSENA))
3024 rss_hf &= TXGBE_RSS_OFFLOAD_ALL;
3026 rss_conf->rss_hf = rss_hf;
3031 txgbe_rss_configure(struct rte_eth_dev *dev)
3033 struct rte_eth_rss_conf rss_conf;
3034 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
3035 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3040 PMD_INIT_FUNC_TRACE();
3043 * Fill in redirection table
3044 * The byte-swap is needed because NIC registers are in
3045 * little-endian order.
3047 if (adapter->rss_reta_updated == 0) {
3049 for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
3050 if (j == dev->data->nb_rx_queues)
3052 reta = (reta >> 8) | LS32(j, 24, 0xFF);
3054 wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
3058 * Configure the RSS key and the RSS protocols used to compute
3059 * the RSS hash of input packets.
3061 rss_conf = dev->data->dev_conf.rx_adv_conf.rss_conf;
3062 if (rss_conf.rss_key == NULL)
3063 rss_conf.rss_key = rss_intel_key; /* Default hash key */
3064 txgbe_dev_rss_hash_update(dev, &rss_conf);
3067 #define NUM_VFTA_REGISTERS 128
3068 #define NIC_RX_BUFFER_SIZE 0x200
3071 txgbe_vmdq_dcb_configure(struct rte_eth_dev *dev)
3073 struct rte_eth_vmdq_dcb_conf *cfg;
3074 struct txgbe_hw *hw;
3075 enum rte_eth_nb_pools num_pools;
3076 uint32_t mrqc, vt_ctl, queue_mapping, vlanctrl;
3078 uint8_t nb_tcs; /* number of traffic classes */
3081 PMD_INIT_FUNC_TRACE();
3082 hw = TXGBE_DEV_HW(dev);
3083 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3084 num_pools = cfg->nb_queue_pools;
3085 /* Check we have a valid number of pools */
3086 if (num_pools != RTE_ETH_16_POOLS && num_pools != RTE_ETH_32_POOLS) {
3087 txgbe_rss_disable(dev);
3090 /* 16 pools -> 8 traffic classes, 32 pools -> 4 traffic classes */
3091 nb_tcs = (uint8_t)(RTE_ETH_VMDQ_DCB_NUM_QUEUES / (int)num_pools);
3094 * split rx buffer up into sections, each for 1 traffic class
3096 pbsize = (uint16_t)(NIC_RX_BUFFER_SIZE / nb_tcs);
3097 for (i = 0; i < nb_tcs; i++) {
3098 uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
3100 rxpbsize &= (~(0x3FF << 10));
3101 /* clear 10 bits. */
3102 rxpbsize |= (pbsize << 10); /* set value */
3103 wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
3105 /* zero alloc all unused TCs */
3106 for (i = nb_tcs; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3107 uint32_t rxpbsize = rd32(hw, TXGBE_PBRXSIZE(i));
3109 rxpbsize &= (~(0x3FF << 10));
3110 /* clear 10 bits. */
3111 wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
3114 if (num_pools == RTE_ETH_16_POOLS) {
3115 mrqc = TXGBE_PORTCTL_NUMTC_8;
3116 mrqc |= TXGBE_PORTCTL_NUMVT_16;
3118 mrqc = TXGBE_PORTCTL_NUMTC_4;
3119 mrqc |= TXGBE_PORTCTL_NUMVT_32;
3121 wr32m(hw, TXGBE_PORTCTL,
3122 TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK, mrqc);
3124 vt_ctl = TXGBE_POOLCTL_RPLEN;
3125 if (cfg->enable_default_pool)
3126 vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
3128 vt_ctl |= TXGBE_POOLCTL_DEFDSA;
3130 wr32(hw, TXGBE_POOLCTL, vt_ctl);
3133 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
3135 * mapping is done with 3 bits per priority,
3136 * so shift by i*3 each time
3138 queue_mapping |= ((cfg->dcb_tc[i] & 0x07) << (i * 3));
3140 wr32(hw, TXGBE_RPUP2TC, queue_mapping);
3142 wr32(hw, TXGBE_ARBRXCTL, TXGBE_ARBRXCTL_RRM);
3144 /* enable vlan filtering and allow all vlan tags through */
3145 vlanctrl = rd32(hw, TXGBE_VLANCTL);
3146 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
3147 wr32(hw, TXGBE_VLANCTL, vlanctrl);
3149 /* enable all vlan filters */
3150 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3151 wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
3153 wr32(hw, TXGBE_POOLRXENA(0),
3154 num_pools == RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3156 wr32(hw, TXGBE_ETHADDRIDX, 0);
3157 wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
3158 wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
3160 /* set up filters for vlan tags as configured */
3161 for (i = 0; i < cfg->nb_pool_maps; i++) {
3162 /* set vlan id in VF register and set the valid bit */
3163 wr32(hw, TXGBE_PSRVLANIDX, i);
3164 wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
3165 (cfg->pool_map[i].vlan_id & 0xFFF)));
3167 wr32(hw, TXGBE_PSRVLANPLM(0), cfg->pool_map[i].pools);
3172 * txgbe_dcb_config_tx_hw_config - Configure general DCB TX parameters
3173 * @dev: pointer to eth_dev structure
3174 * @dcb_config: pointer to txgbe_dcb_config structure
3177 txgbe_dcb_tx_hw_config(struct rte_eth_dev *dev,
3178 struct txgbe_dcb_config *dcb_config)
3181 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3183 PMD_INIT_FUNC_TRACE();
3185 /* Disable the Tx desc arbiter */
3186 reg = rd32(hw, TXGBE_ARBTXCTL);
3187 reg |= TXGBE_ARBTXCTL_DIA;
3188 wr32(hw, TXGBE_ARBTXCTL, reg);
3190 /* Enable DCB for Tx with 8 TCs */
3191 reg = rd32(hw, TXGBE_PORTCTL);
3192 reg &= TXGBE_PORTCTL_NUMTC_MASK;
3193 reg |= TXGBE_PORTCTL_DCB;
3194 if (dcb_config->num_tcs.pg_tcs == 8)
3195 reg |= TXGBE_PORTCTL_NUMTC_8;
3197 reg |= TXGBE_PORTCTL_NUMTC_4;
3199 wr32(hw, TXGBE_PORTCTL, reg);
3201 /* Enable the Tx desc arbiter */
3202 reg = rd32(hw, TXGBE_ARBTXCTL);
3203 reg &= ~TXGBE_ARBTXCTL_DIA;
3204 wr32(hw, TXGBE_ARBTXCTL, reg);
3208 * txgbe_vmdq_dcb_hw_tx_config - Configure general VMDQ+DCB TX parameters
3209 * @dev: pointer to rte_eth_dev structure
3210 * @dcb_config: pointer to txgbe_dcb_config structure
3213 txgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev,
3214 struct txgbe_dcb_config *dcb_config)
3216 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3217 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3218 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3220 PMD_INIT_FUNC_TRACE();
3221 /*PF VF Transmit Enable*/
3222 wr32(hw, TXGBE_POOLTXENA(0),
3223 vmdq_tx_conf->nb_queue_pools ==
3224 RTE_ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF);
3226 /*Configure general DCB TX parameters*/
3227 txgbe_dcb_tx_hw_config(dev, dcb_config);
3231 txgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev,
3232 struct txgbe_dcb_config *dcb_config)
3234 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
3235 &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
3236 struct txgbe_dcb_tc_config *tc;
3239 /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
3240 if (vmdq_rx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
3241 dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
3242 dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
3244 dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
3245 dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
3248 /* Initialize User Priority to Traffic Class mapping */
3249 for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
3250 tc = &dcb_config->tc_config[j];
3251 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3254 /* User Priority to Traffic Class mapping */
3255 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3256 j = vmdq_rx_conf->dcb_tc[i];
3257 tc = &dcb_config->tc_config[j];
3258 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3264 txgbe_dcb_vt_tx_config(struct rte_eth_dev *dev,
3265 struct txgbe_dcb_config *dcb_config)
3267 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
3268 &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf;
3269 struct txgbe_dcb_tc_config *tc;
3272 /* convert rte_eth_conf.rx_adv_conf to struct txgbe_dcb_config */
3273 if (vmdq_tx_conf->nb_queue_pools == RTE_ETH_16_POOLS) {
3274 dcb_config->num_tcs.pg_tcs = RTE_ETH_8_TCS;
3275 dcb_config->num_tcs.pfc_tcs = RTE_ETH_8_TCS;
3277 dcb_config->num_tcs.pg_tcs = RTE_ETH_4_TCS;
3278 dcb_config->num_tcs.pfc_tcs = RTE_ETH_4_TCS;
3281 /* Initialize User Priority to Traffic Class mapping */
3282 for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
3283 tc = &dcb_config->tc_config[j];
3284 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3287 /* User Priority to Traffic Class mapping */
3288 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3289 j = vmdq_tx_conf->dcb_tc[i];
3290 tc = &dcb_config->tc_config[j];
3291 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3297 txgbe_dcb_rx_config(struct rte_eth_dev *dev,
3298 struct txgbe_dcb_config *dcb_config)
3300 struct rte_eth_dcb_rx_conf *rx_conf =
3301 &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf;
3302 struct txgbe_dcb_tc_config *tc;
3305 dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs;
3306 dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs;
3308 /* Initialize User Priority to Traffic Class mapping */
3309 for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
3310 tc = &dcb_config->tc_config[j];
3311 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0;
3314 /* User Priority to Traffic Class mapping */
3315 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3316 j = rx_conf->dcb_tc[i];
3317 tc = &dcb_config->tc_config[j];
3318 tc->path[TXGBE_DCB_RX_CONFIG].up_to_tc_bitmap |=
3324 txgbe_dcb_tx_config(struct rte_eth_dev *dev,
3325 struct txgbe_dcb_config *dcb_config)
3327 struct rte_eth_dcb_tx_conf *tx_conf =
3328 &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf;
3329 struct txgbe_dcb_tc_config *tc;
3332 dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs;
3333 dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs;
3335 /* Initialize User Priority to Traffic Class mapping */
3336 for (j = 0; j < TXGBE_DCB_TC_MAX; j++) {
3337 tc = &dcb_config->tc_config[j];
3338 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0;
3341 /* User Priority to Traffic Class mapping */
3342 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3343 j = tx_conf->dcb_tc[i];
3344 tc = &dcb_config->tc_config[j];
3345 tc->path[TXGBE_DCB_TX_CONFIG].up_to_tc_bitmap |=
3351 * txgbe_dcb_rx_hw_config - Configure general DCB RX HW parameters
3352 * @dev: pointer to eth_dev structure
3353 * @dcb_config: pointer to txgbe_dcb_config structure
3356 txgbe_dcb_rx_hw_config(struct rte_eth_dev *dev,
3357 struct txgbe_dcb_config *dcb_config)
3363 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3365 PMD_INIT_FUNC_TRACE();
3367 * Disable the arbiter before changing parameters
3368 * (always enable recycle mode; WSP)
3370 reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP | TXGBE_ARBRXCTL_DIA;
3371 wr32(hw, TXGBE_ARBRXCTL, reg);
3373 reg = rd32(hw, TXGBE_PORTCTL);
3374 reg &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
3375 if (dcb_config->num_tcs.pg_tcs == 4) {
3376 reg |= TXGBE_PORTCTL_NUMTC_4;
3377 if (dcb_config->vt_mode)
3378 reg |= TXGBE_PORTCTL_NUMVT_32;
3380 wr32(hw, TXGBE_POOLCTL, 0);
3383 if (dcb_config->num_tcs.pg_tcs == 8) {
3384 reg |= TXGBE_PORTCTL_NUMTC_8;
3385 if (dcb_config->vt_mode)
3386 reg |= TXGBE_PORTCTL_NUMVT_16;
3388 wr32(hw, TXGBE_POOLCTL, 0);
3391 wr32(hw, TXGBE_PORTCTL, reg);
3393 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3394 /* Disable drop for all queues in VMDQ mode*/
3395 for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
3396 u32 val = 1 << (q % 32);
3397 wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
3400 /* Enable drop for all queues in SRIOV mode */
3401 for (q = 0; q < TXGBE_MAX_RX_QUEUE_NUM; q++) {
3402 u32 val = 1 << (q % 32);
3403 wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
3407 /* VLNCTL: enable vlan filtering and allow all vlan tags through */
3408 vlanctrl = rd32(hw, TXGBE_VLANCTL);
3409 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
3410 wr32(hw, TXGBE_VLANCTL, vlanctrl);
3412 /* VLANTBL - enable all vlan filters */
3413 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3414 wr32(hw, TXGBE_VLANTBL(i), 0xFFFFFFFF);
3417 * Configure Rx packet plane (recycle mode; WSP) and
3420 reg = TXGBE_ARBRXCTL_RRM | TXGBE_ARBRXCTL_WSP;
3421 wr32(hw, TXGBE_ARBRXCTL, reg);
3425 txgbe_dcb_hw_arbite_rx_config(struct txgbe_hw *hw, uint16_t *refill,
3426 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3428 txgbe_dcb_config_rx_arbiter_raptor(hw, refill, max, bwg_id,
3433 txgbe_dcb_hw_arbite_tx_config(struct txgbe_hw *hw, uint16_t *refill,
3434 uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map)
3436 switch (hw->mac.type) {
3437 case txgbe_mac_raptor:
3438 txgbe_dcb_config_tx_desc_arbiter_raptor(hw, refill,
3440 txgbe_dcb_config_tx_data_arbiter_raptor(hw, refill,
3441 max, bwg_id, tsa, map);
3448 #define DCB_RX_CONFIG 1
3449 #define DCB_TX_CONFIG 1
3450 #define DCB_TX_PB 1024
3452 * txgbe_dcb_hw_configure - Enable DCB and configure
3453 * general DCB in VT mode and non-VT mode parameters
3454 * @dev: pointer to rte_eth_dev structure
3455 * @dcb_config: pointer to txgbe_dcb_config structure
3458 txgbe_dcb_hw_configure(struct rte_eth_dev *dev,
3459 struct txgbe_dcb_config *dcb_config)
3462 uint8_t i, pfc_en, nb_tcs;
3463 uint16_t pbsize, rx_buffer_size;
3464 uint8_t config_dcb_rx = 0;
3465 uint8_t config_dcb_tx = 0;
3466 uint8_t tsa[TXGBE_DCB_TC_MAX] = {0};
3467 uint8_t bwgid[TXGBE_DCB_TC_MAX] = {0};
3468 uint16_t refill[TXGBE_DCB_TC_MAX] = {0};
3469 uint16_t max[TXGBE_DCB_TC_MAX] = {0};
3470 uint8_t map[TXGBE_DCB_TC_MAX] = {0};
3471 struct txgbe_dcb_tc_config *tc;
3472 uint32_t max_frame = dev->data->mtu +
3473 RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
3474 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3475 struct txgbe_bw_conf *bw_conf = TXGBE_DEV_BW_CONF(dev);
3477 switch (dev->data->dev_conf.rxmode.mq_mode) {
3478 case RTE_ETH_MQ_RX_VMDQ_DCB:
3479 dcb_config->vt_mode = true;
3480 config_dcb_rx = DCB_RX_CONFIG;
3482 * get dcb and VT rx configuration parameters
3485 txgbe_vmdq_dcb_rx_config(dev, dcb_config);
3486 /*Configure general VMDQ and DCB RX parameters*/
3487 txgbe_vmdq_dcb_configure(dev);
3489 case RTE_ETH_MQ_RX_DCB:
3490 case RTE_ETH_MQ_RX_DCB_RSS:
3491 dcb_config->vt_mode = false;
3492 config_dcb_rx = DCB_RX_CONFIG;
3493 /* Get dcb TX configuration parameters from rte_eth_conf */
3494 txgbe_dcb_rx_config(dev, dcb_config);
3495 /*Configure general DCB RX parameters*/
3496 txgbe_dcb_rx_hw_config(dev, dcb_config);
3499 PMD_INIT_LOG(ERR, "Incorrect DCB RX mode configuration");
3502 switch (dev->data->dev_conf.txmode.mq_mode) {
3503 case RTE_ETH_MQ_TX_VMDQ_DCB:
3504 dcb_config->vt_mode = true;
3505 config_dcb_tx = DCB_TX_CONFIG;
3506 /* get DCB and VT TX configuration parameters
3509 txgbe_dcb_vt_tx_config(dev, dcb_config);
3510 /* Configure general VMDQ and DCB TX parameters */
3511 txgbe_vmdq_dcb_hw_tx_config(dev, dcb_config);
3514 case RTE_ETH_MQ_TX_DCB:
3515 dcb_config->vt_mode = false;
3516 config_dcb_tx = DCB_TX_CONFIG;
3517 /* get DCB TX configuration parameters from rte_eth_conf */
3518 txgbe_dcb_tx_config(dev, dcb_config);
3519 /* Configure general DCB TX parameters */
3520 txgbe_dcb_tx_hw_config(dev, dcb_config);
3523 PMD_INIT_LOG(ERR, "Incorrect DCB TX mode configuration");
3527 nb_tcs = dcb_config->num_tcs.pfc_tcs;
3529 txgbe_dcb_unpack_map_cee(dcb_config, TXGBE_DCB_RX_CONFIG, map);
3530 if (nb_tcs == RTE_ETH_4_TCS) {
3531 /* Avoid un-configured priority mapping to TC0 */
3533 uint8_t mask = 0xFF;
3535 for (i = 0; i < RTE_ETH_DCB_NUM_USER_PRIORITIES - 4; i++)
3536 mask = (uint8_t)(mask & (~(1 << map[i])));
3537 for (i = 0; mask && (i < TXGBE_DCB_TC_MAX); i++) {
3538 if ((mask & 0x1) && j < RTE_ETH_DCB_NUM_USER_PRIORITIES)
3542 /* Re-configure 4 TCs BW */
3543 for (i = 0; i < nb_tcs; i++) {
3544 tc = &dcb_config->tc_config[i];
3545 if (bw_conf->tc_num != nb_tcs)
3546 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
3547 (uint8_t)(100 / nb_tcs);
3548 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
3549 (uint8_t)(100 / nb_tcs);
3551 for (; i < TXGBE_DCB_TC_MAX; i++) {
3552 tc = &dcb_config->tc_config[i];
3553 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent = 0;
3554 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent = 0;
3557 /* Re-configure 8 TCs BW */
3558 for (i = 0; i < nb_tcs; i++) {
3559 tc = &dcb_config->tc_config[i];
3560 if (bw_conf->tc_num != nb_tcs)
3561 tc->path[TXGBE_DCB_TX_CONFIG].bwg_percent =
3562 (uint8_t)(100 / nb_tcs + (i & 1));
3563 tc->path[TXGBE_DCB_RX_CONFIG].bwg_percent =
3564 (uint8_t)(100 / nb_tcs + (i & 1));
3568 rx_buffer_size = NIC_RX_BUFFER_SIZE;
3570 if (config_dcb_rx) {
3571 /* Set RX buffer size */
3572 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3573 uint32_t rxpbsize = pbsize << 10;
3575 for (i = 0; i < nb_tcs; i++)
3576 wr32(hw, TXGBE_PBRXSIZE(i), rxpbsize);
3578 /* zero alloc all unused TCs */
3579 for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++)
3580 wr32(hw, TXGBE_PBRXSIZE(i), 0);
3582 if (config_dcb_tx) {
3583 /* Only support an equally distributed
3584 * Tx packet buffer strategy.
3586 uint32_t txpktsize = TXGBE_PBTXSIZE_MAX / nb_tcs;
3587 uint32_t txpbthresh = (txpktsize / DCB_TX_PB) -
3588 TXGBE_TXPKT_SIZE_MAX;
3590 for (i = 0; i < nb_tcs; i++) {
3591 wr32(hw, TXGBE_PBTXSIZE(i), txpktsize);
3592 wr32(hw, TXGBE_PBTXDMATH(i), txpbthresh);
3594 /* Clear unused TCs, if any, to zero buffer size*/
3595 for (; i < RTE_ETH_DCB_NUM_USER_PRIORITIES; i++) {
3596 wr32(hw, TXGBE_PBTXSIZE(i), 0);
3597 wr32(hw, TXGBE_PBTXDMATH(i), 0);
3601 /*Calculates traffic class credits*/
3602 txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
3603 TXGBE_DCB_TX_CONFIG);
3604 txgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame,
3605 TXGBE_DCB_RX_CONFIG);
3607 if (config_dcb_rx) {
3608 /* Unpack CEE standard containers */
3609 txgbe_dcb_unpack_refill_cee(dcb_config,
3610 TXGBE_DCB_RX_CONFIG, refill);
3611 txgbe_dcb_unpack_max_cee(dcb_config, max);
3612 txgbe_dcb_unpack_bwgid_cee(dcb_config,
3613 TXGBE_DCB_RX_CONFIG, bwgid);
3614 txgbe_dcb_unpack_tsa_cee(dcb_config,
3615 TXGBE_DCB_RX_CONFIG, tsa);
3616 /* Configure PG(ETS) RX */
3617 txgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map);
3620 if (config_dcb_tx) {
3621 /* Unpack CEE standard containers */
3622 txgbe_dcb_unpack_refill_cee(dcb_config,
3623 TXGBE_DCB_TX_CONFIG, refill);
3624 txgbe_dcb_unpack_max_cee(dcb_config, max);
3625 txgbe_dcb_unpack_bwgid_cee(dcb_config,
3626 TXGBE_DCB_TX_CONFIG, bwgid);
3627 txgbe_dcb_unpack_tsa_cee(dcb_config,
3628 TXGBE_DCB_TX_CONFIG, tsa);
3629 /* Configure PG(ETS) TX */
3630 txgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map);
3633 /* Configure queue statistics registers */
3634 txgbe_dcb_config_tc_stats_raptor(hw, dcb_config);
3636 /* Check if the PFC is supported */
3637 if (dev->data->dev_conf.dcb_capability_en & RTE_ETH_DCB_PFC_SUPPORT) {
3638 pbsize = (uint16_t)(rx_buffer_size / nb_tcs);
3639 for (i = 0; i < nb_tcs; i++) {
3640 /* If the TC count is 8,
3641 * and the default high_water is 48,
3642 * the low_water is 16 as default.
3644 hw->fc.high_water[i] = (pbsize * 3) / 4;
3645 hw->fc.low_water[i] = pbsize / 4;
3646 /* Enable pfc for this TC */
3647 tc = &dcb_config->tc_config[i];
3648 tc->pfc = txgbe_dcb_pfc_enabled;
3650 txgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en);
3651 if (dcb_config->num_tcs.pfc_tcs == RTE_ETH_4_TCS)
3653 ret = txgbe_dcb_config_pfc(hw, pfc_en, map);
3659 void txgbe_configure_pb(struct rte_eth_dev *dev)
3661 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3662 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3665 int tc = dev_conf->rx_adv_conf.dcb_rx_conf.nb_tcs;
3667 /* Reserve 256KB(/512KB) rx buffer for fdir */
3670 hw->mac.setup_pba(hw, tc, hdrm, PBA_STRATEGY_EQUAL);
3673 void txgbe_configure_port(struct rte_eth_dev *dev)
3675 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3677 uint16_t tpids[8] = {RTE_ETHER_TYPE_VLAN, RTE_ETHER_TYPE_QINQ,
3682 PMD_INIT_FUNC_TRACE();
3684 /* default outer vlan tpid */
3685 wr32(hw, TXGBE_EXTAG,
3686 TXGBE_EXTAG_ETAG(RTE_ETHER_TYPE_ETAG) |
3687 TXGBE_EXTAG_VLAN(RTE_ETHER_TYPE_QINQ));
3689 /* default inner vlan tpid */
3690 wr32m(hw, TXGBE_VLANCTL,
3691 TXGBE_VLANCTL_TPID_MASK,
3692 TXGBE_VLANCTL_TPID(RTE_ETHER_TYPE_VLAN));
3693 wr32m(hw, TXGBE_DMATXCTRL,
3694 TXGBE_DMATXCTRL_TPID_MASK,
3695 TXGBE_DMATXCTRL_TPID(RTE_ETHER_TYPE_VLAN));
3697 /* default vlan tpid filters */
3698 for (i = 0; i < 8; i++) {
3699 wr32m(hw, TXGBE_TAGTPID(i / 2),
3700 (i % 2 ? TXGBE_TAGTPID_MSB_MASK
3701 : TXGBE_TAGTPID_LSB_MASK),
3702 (i % 2 ? TXGBE_TAGTPID_MSB(tpids[i])
3703 : TXGBE_TAGTPID_LSB(tpids[i])));
3706 /* default vxlan port */
3707 wr32(hw, TXGBE_VXLANPORT, 4789);
3711 * txgbe_configure_dcb - Configure DCB Hardware
3712 * @dev: pointer to rte_eth_dev
3714 void txgbe_configure_dcb(struct rte_eth_dev *dev)
3716 struct txgbe_dcb_config *dcb_cfg = TXGBE_DEV_DCB_CONFIG(dev);
3717 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
3719 PMD_INIT_FUNC_TRACE();
3721 /* check support mq_mode for DCB */
3722 if (dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_VMDQ_DCB &&
3723 dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB &&
3724 dev_conf->rxmode.mq_mode != RTE_ETH_MQ_RX_DCB_RSS)
3727 if (dev->data->nb_rx_queues > RTE_ETH_DCB_NUM_QUEUES)
3730 /** Configure DCB hardware **/
3731 txgbe_dcb_hw_configure(dev, dcb_cfg);
3735 * VMDq only support for 10 GbE NIC.
3738 txgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev)
3740 struct rte_eth_vmdq_rx_conf *cfg;
3741 struct txgbe_hw *hw;
3742 enum rte_eth_nb_pools num_pools;
3743 uint32_t mrqc, vt_ctl, vlanctrl;
3747 PMD_INIT_FUNC_TRACE();
3748 hw = TXGBE_DEV_HW(dev);
3749 cfg = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
3750 num_pools = cfg->nb_queue_pools;
3752 txgbe_rss_disable(dev);
3755 mrqc = TXGBE_PORTCTL_NUMVT_64;
3756 wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mrqc);
3758 /* turn on virtualisation and set the default pool */
3759 vt_ctl = TXGBE_POOLCTL_RPLEN;
3760 if (cfg->enable_default_pool)
3761 vt_ctl |= TXGBE_POOLCTL_DEFPL(cfg->default_pool);
3763 vt_ctl |= TXGBE_POOLCTL_DEFDSA;
3765 wr32(hw, TXGBE_POOLCTL, vt_ctl);
3767 for (i = 0; i < (int)num_pools; i++) {
3768 vmolr = txgbe_convert_vm_rx_mask_to_val(cfg->rx_mode, vmolr);
3769 wr32(hw, TXGBE_POOLETHCTL(i), vmolr);
3772 /* enable vlan filtering and allow all vlan tags through */
3773 vlanctrl = rd32(hw, TXGBE_VLANCTL);
3774 vlanctrl |= TXGBE_VLANCTL_VFE; /* enable vlan filters */
3775 wr32(hw, TXGBE_VLANCTL, vlanctrl);
3777 /* enable all vlan filters */
3778 for (i = 0; i < NUM_VFTA_REGISTERS; i++)
3779 wr32(hw, TXGBE_VLANTBL(i), UINT32_MAX);
3781 /* pool enabling for receive - 64 */
3782 wr32(hw, TXGBE_POOLRXENA(0), UINT32_MAX);
3783 if (num_pools == RTE_ETH_64_POOLS)
3784 wr32(hw, TXGBE_POOLRXENA(1), UINT32_MAX);
3787 * allow pools to read specific mac addresses
3788 * In this case, all pools should be able to read from mac addr 0
3790 wr32(hw, TXGBE_ETHADDRIDX, 0);
3791 wr32(hw, TXGBE_ETHADDRASSL, 0xFFFFFFFF);
3792 wr32(hw, TXGBE_ETHADDRASSH, 0xFFFFFFFF);
3794 /* set up filters for vlan tags as configured */
3795 for (i = 0; i < cfg->nb_pool_maps; i++) {
3796 /* set vlan id in VF register and set the valid bit */
3797 wr32(hw, TXGBE_PSRVLANIDX, i);
3798 wr32(hw, TXGBE_PSRVLAN, (TXGBE_PSRVLAN_EA |
3799 TXGBE_PSRVLAN_VID(cfg->pool_map[i].vlan_id)));
3801 * Put the allowed pools in VFB reg. As we only have 16 or 64
3802 * pools, we only need to use the first half of the register
3805 if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0)
3806 wr32(hw, TXGBE_PSRVLANPLM(0),
3807 (cfg->pool_map[i].pools & UINT32_MAX));
3809 wr32(hw, TXGBE_PSRVLANPLM(1),
3810 ((cfg->pool_map[i].pools >> 32) & UINT32_MAX));
3813 /* Tx General Switch Control Enables VMDQ loopback */
3814 if (cfg->enable_loop_back) {
3815 wr32(hw, TXGBE_PSRCTL, TXGBE_PSRCTL_LBENA);
3816 for (i = 0; i < 64; i++)
3817 wr32m(hw, TXGBE_POOLETHCTL(i),
3818 TXGBE_POOLETHCTL_LLB, TXGBE_POOLETHCTL_LLB);
3825 * txgbe_vmdq_tx_hw_configure - Configure general VMDq TX parameters
3826 * @hw: pointer to hardware structure
3829 txgbe_vmdq_tx_hw_configure(struct txgbe_hw *hw)
3834 PMD_INIT_FUNC_TRACE();
3835 /*PF VF Transmit Enable*/
3836 wr32(hw, TXGBE_POOLTXENA(0), UINT32_MAX);
3837 wr32(hw, TXGBE_POOLTXENA(1), UINT32_MAX);
3839 /* Disable the Tx desc arbiter */
3840 reg = rd32(hw, TXGBE_ARBTXCTL);
3841 reg |= TXGBE_ARBTXCTL_DIA;
3842 wr32(hw, TXGBE_ARBTXCTL, reg);
3844 wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK,
3845 TXGBE_PORTCTL_NUMVT_64);
3847 /* Disable drop for all queues */
3848 for (q = 0; q < 128; q++) {
3849 u32 val = 1 << (q % 32);
3850 wr32m(hw, TXGBE_QPRXDROP(q / 32), val, val);
3853 /* Enable the Tx desc arbiter */
3854 reg = rd32(hw, TXGBE_ARBTXCTL);
3855 reg &= ~TXGBE_ARBTXCTL_DIA;
3856 wr32(hw, TXGBE_ARBTXCTL, reg);
3861 static int __rte_cold
3862 txgbe_alloc_rx_queue_mbufs(struct txgbe_rx_queue *rxq)
3864 struct txgbe_rx_entry *rxe = rxq->sw_ring;
3868 /* Initialize software ring entries */
3869 for (i = 0; i < rxq->nb_rx_desc; i++) {
3870 volatile struct txgbe_rx_desc *rxd;
3871 struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
3874 PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
3875 (unsigned int)rxq->queue_id);
3879 mbuf->data_off = RTE_PKTMBUF_HEADROOM;
3880 mbuf->port = rxq->port_id;
3883 rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf));
3884 rxd = &rxq->rx_ring[i];
3885 TXGBE_RXD_HDRADDR(rxd, 0);
3886 TXGBE_RXD_PKTADDR(rxd, dma_addr);
3894 txgbe_config_vf_rss(struct rte_eth_dev *dev)
3896 struct txgbe_hw *hw;
3899 txgbe_rss_configure(dev);
3901 hw = TXGBE_DEV_HW(dev);
3904 mrqc = rd32(hw, TXGBE_PORTCTL);
3905 mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
3906 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3907 case RTE_ETH_64_POOLS:
3908 mrqc |= TXGBE_PORTCTL_NUMVT_64;
3911 case RTE_ETH_32_POOLS:
3912 mrqc |= TXGBE_PORTCTL_NUMVT_32;
3916 PMD_INIT_LOG(ERR, "Invalid pool number in IOV mode with VMDQ RSS");
3920 wr32(hw, TXGBE_PORTCTL, mrqc);
3926 txgbe_config_vf_default(struct rte_eth_dev *dev)
3928 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
3931 mrqc = rd32(hw, TXGBE_PORTCTL);
3932 mrqc &= ~(TXGBE_PORTCTL_NUMTC_MASK | TXGBE_PORTCTL_NUMVT_MASK);
3933 switch (RTE_ETH_DEV_SRIOV(dev).active) {
3934 case RTE_ETH_64_POOLS:
3935 mrqc |= TXGBE_PORTCTL_NUMVT_64;
3938 case RTE_ETH_32_POOLS:
3939 mrqc |= TXGBE_PORTCTL_NUMVT_32;
3942 case RTE_ETH_16_POOLS:
3943 mrqc |= TXGBE_PORTCTL_NUMVT_16;
3947 "invalid pool number in IOV mode");
3951 wr32(hw, TXGBE_PORTCTL, mrqc);
3957 txgbe_dev_mq_rx_configure(struct rte_eth_dev *dev)
3959 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
3961 * SRIOV inactive scheme
3962 * any DCB/RSS w/o VMDq multi-queue setting
3964 switch (dev->data->dev_conf.rxmode.mq_mode) {
3965 case RTE_ETH_MQ_RX_RSS:
3966 case RTE_ETH_MQ_RX_DCB_RSS:
3967 case RTE_ETH_MQ_RX_VMDQ_RSS:
3968 txgbe_rss_configure(dev);
3971 case RTE_ETH_MQ_RX_VMDQ_DCB:
3972 txgbe_vmdq_dcb_configure(dev);
3975 case RTE_ETH_MQ_RX_VMDQ_ONLY:
3976 txgbe_vmdq_rx_hw_configure(dev);
3979 case RTE_ETH_MQ_RX_NONE:
3981 /* if mq_mode is none, disable rss mode.*/
3982 txgbe_rss_disable(dev);
3986 /* SRIOV active scheme
3987 * Support RSS together with SRIOV.
3989 switch (dev->data->dev_conf.rxmode.mq_mode) {
3990 case RTE_ETH_MQ_RX_RSS:
3991 case RTE_ETH_MQ_RX_VMDQ_RSS:
3992 txgbe_config_vf_rss(dev);
3994 case RTE_ETH_MQ_RX_VMDQ_DCB:
3995 case RTE_ETH_MQ_RX_DCB:
3996 /* In SRIOV, the configuration is the same as VMDq case */
3997 txgbe_vmdq_dcb_configure(dev);
3999 /* DCB/RSS together with SRIOV is not supported */
4000 case RTE_ETH_MQ_RX_VMDQ_DCB_RSS:
4001 case RTE_ETH_MQ_RX_DCB_RSS:
4003 "Could not support DCB/RSS with VMDq & SRIOV");
4006 txgbe_config_vf_default(dev);
4015 txgbe_dev_mq_tx_configure(struct rte_eth_dev *dev)
4017 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4021 /* disable arbiter */
4022 rttdcs = rd32(hw, TXGBE_ARBTXCTL);
4023 rttdcs |= TXGBE_ARBTXCTL_DIA;
4024 wr32(hw, TXGBE_ARBTXCTL, rttdcs);
4026 if (RTE_ETH_DEV_SRIOV(dev).active == 0) {
4028 * SRIOV inactive scheme
4029 * any DCB w/o VMDq multi-queue setting
4031 if (dev->data->dev_conf.txmode.mq_mode == RTE_ETH_MQ_TX_VMDQ_ONLY)
4032 txgbe_vmdq_tx_hw_configure(hw);
4034 wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, 0);
4036 switch (RTE_ETH_DEV_SRIOV(dev).active) {
4038 * SRIOV active scheme
4039 * FIXME if support DCB together with VMDq & SRIOV
4041 case RTE_ETH_64_POOLS:
4042 mtqc = TXGBE_PORTCTL_NUMVT_64;
4044 case RTE_ETH_32_POOLS:
4045 mtqc = TXGBE_PORTCTL_NUMVT_32;
4047 case RTE_ETH_16_POOLS:
4048 mtqc = TXGBE_PORTCTL_NUMVT_16;
4052 PMD_INIT_LOG(ERR, "invalid pool number in IOV mode");
4054 wr32m(hw, TXGBE_PORTCTL, TXGBE_PORTCTL_NUMVT_MASK, mtqc);
4057 /* re-enable arbiter */
4058 rttdcs &= ~TXGBE_ARBTXCTL_DIA;
4059 wr32(hw, TXGBE_ARBTXCTL, rttdcs);
4065 * txgbe_get_rscctl_maxdesc
4067 * @pool Memory pool of the Rx queue
4069 static inline uint32_t
4070 txgbe_get_rscctl_maxdesc(struct rte_mempool *pool)
4072 struct rte_pktmbuf_pool_private *mp_priv = rte_mempool_get_priv(pool);
4075 RTE_IPV4_MAX_PKT_LEN /
4076 (mp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM);
4079 return TXGBE_RXCFG_RSCMAX_16;
4080 else if (maxdesc >= 8)
4081 return TXGBE_RXCFG_RSCMAX_8;
4082 else if (maxdesc >= 4)
4083 return TXGBE_RXCFG_RSCMAX_4;
4085 return TXGBE_RXCFG_RSCMAX_1;
4089 * txgbe_set_rsc - configure RSC related port HW registers
4091 * Configures the port's RSC related registers.
4095 * Returns 0 in case of success or a non-zero error code
4098 txgbe_set_rsc(struct rte_eth_dev *dev)
4100 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4101 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4102 struct rte_eth_dev_info dev_info = { 0 };
4103 bool rsc_capable = false;
4109 dev->dev_ops->dev_infos_get(dev, &dev_info);
4110 if (dev_info.rx_offload_capa & RTE_ETH_RX_OFFLOAD_TCP_LRO)
4113 if (!rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
4114 PMD_INIT_LOG(CRIT, "LRO is requested on HW that doesn't "
4119 /* RSC global configuration */
4121 if ((rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) &&
4122 (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO)) {
4123 PMD_INIT_LOG(CRIT, "LRO can't be enabled when HW CRC "
4128 rfctl = rd32(hw, TXGBE_PSRCTL);
4129 if (rsc_capable && (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
4130 rfctl &= ~TXGBE_PSRCTL_RSCDIA;
4132 rfctl |= TXGBE_PSRCTL_RSCDIA;
4133 wr32(hw, TXGBE_PSRCTL, rfctl);
4135 /* If LRO hasn't been requested - we are done here. */
4136 if (!(rx_conf->offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO))
4139 /* Set PSRCTL.RSCACK bit */
4140 rdrxctl = rd32(hw, TXGBE_PSRCTL);
4141 rdrxctl |= TXGBE_PSRCTL_RSCACK;
4142 wr32(hw, TXGBE_PSRCTL, rdrxctl);
4144 /* Per-queue RSC configuration */
4145 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4146 struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
4148 rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
4150 rd32(hw, TXGBE_POOLRSS(rxq->reg_idx));
4152 rd32(hw, TXGBE_ITR(rxq->reg_idx));
4155 * txgbe PMD doesn't support header-split at the moment.
4157 srrctl &= ~TXGBE_RXCFG_HDRLEN_MASK;
4158 srrctl |= TXGBE_RXCFG_HDRLEN(128);
4161 * TODO: Consider setting the Receive Descriptor Minimum
4162 * Threshold Size for an RSC case. This is not an obviously
4163 * beneficiary option but the one worth considering...
4166 srrctl |= TXGBE_RXCFG_RSCENA;
4167 srrctl &= ~TXGBE_RXCFG_RSCMAX_MASK;
4168 srrctl |= txgbe_get_rscctl_maxdesc(rxq->mb_pool);
4169 psrtype |= TXGBE_POOLRSS_L4HDR;
4172 * RSC: Set ITR interval corresponding to 2K ints/s.
4174 * Full-sized RSC aggregations for a 10Gb/s link will
4175 * arrive at about 20K aggregation/s rate.
4177 * 2K inst/s rate will make only 10% of the
4178 * aggregations to be closed due to the interrupt timer
4179 * expiration for a streaming at wire-speed case.
4181 * For a sparse streaming case this setting will yield
4182 * at most 500us latency for a single RSC aggregation.
4184 eitr &= ~TXGBE_ITR_IVAL_MASK;
4185 eitr |= TXGBE_ITR_IVAL_10G(TXGBE_QUEUE_ITR_INTERVAL_DEFAULT);
4186 eitr |= TXGBE_ITR_WRDSA;
4188 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
4189 wr32(hw, TXGBE_POOLRSS(rxq->reg_idx), psrtype);
4190 wr32(hw, TXGBE_ITR(rxq->reg_idx), eitr);
4193 * RSC requires the mapping of the queue to the
4196 txgbe_set_ivar_map(hw, 0, rxq->reg_idx, i);
4201 PMD_INIT_LOG(DEBUG, "enabling LRO mode");
4207 txgbe_set_rx_function(struct rte_eth_dev *dev)
4210 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4213 * Initialize the appropriate LRO callback.
4215 * If all queues satisfy the bulk allocation preconditions
4216 * (adapter->rx_bulk_alloc_allowed is TRUE) then we may use
4217 * bulk allocation. Otherwise use a single allocation version.
4219 if (dev->data->lro) {
4220 if (adapter->rx_bulk_alloc_allowed) {
4221 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a bulk "
4222 "allocation version");
4223 dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
4225 PMD_INIT_LOG(DEBUG, "LRO is requested. Using a single "
4226 "allocation version");
4227 dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
4229 } else if (dev->data->scattered_rx) {
4231 * Set the non-LRO scattered callback: there are bulk and
4232 * single allocation versions.
4234 if (adapter->rx_bulk_alloc_allowed) {
4235 PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk "
4236 "allocation callback (port=%d).",
4237 dev->data->port_id);
4238 dev->rx_pkt_burst = txgbe_recv_pkts_lro_bulk_alloc;
4240 PMD_INIT_LOG(DEBUG, "Using Regular (non-vector, "
4241 "single allocation) "
4242 "Scattered Rx callback "
4244 dev->data->port_id);
4246 dev->rx_pkt_burst = txgbe_recv_pkts_lro_single_alloc;
4249 * Below we set "simple" callbacks according to port/queues parameters.
4250 * If parameters allow we are going to choose between the following
4253 * - Single buffer allocation (the simplest one)
4255 } else if (adapter->rx_bulk_alloc_allowed) {
4256 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
4257 "satisfied. Rx Burst Bulk Alloc function "
4258 "will be used on port=%d.",
4259 dev->data->port_id);
4261 dev->rx_pkt_burst = txgbe_recv_pkts_bulk_alloc;
4263 PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not "
4264 "satisfied, or Scattered Rx is requested "
4266 dev->data->port_id);
4268 dev->rx_pkt_burst = txgbe_recv_pkts;
4271 #ifdef RTE_LIB_SECURITY
4272 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4273 struct txgbe_rx_queue *rxq = dev->data->rx_queues[i];
4275 rxq->using_ipsec = !!(dev->data->dev_conf.rxmode.offloads &
4276 RTE_ETH_RX_OFFLOAD_SECURITY);
4282 * Initializes Receive Unit.
4285 txgbe_dev_rx_init(struct rte_eth_dev *dev)
4287 struct txgbe_hw *hw;
4288 struct txgbe_rx_queue *rxq;
4297 struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
4300 PMD_INIT_FUNC_TRACE();
4301 hw = TXGBE_DEV_HW(dev);
4304 * Make sure receives are disabled while setting
4305 * up the RX context (registers, descriptor rings, etc.).
4307 wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_ENA, 0);
4308 wr32m(hw, TXGBE_PBRXCTL, TXGBE_PBRXCTL_ENA, 0);
4310 /* Enable receipt of broadcasted frames */
4311 fctrl = rd32(hw, TXGBE_PSRCTL);
4312 fctrl |= TXGBE_PSRCTL_BCA;
4313 wr32(hw, TXGBE_PSRCTL, fctrl);
4316 * Configure CRC stripping, if any.
4318 hlreg0 = rd32(hw, TXGBE_SECRXCTL);
4319 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
4320 hlreg0 &= ~TXGBE_SECRXCTL_CRCSTRIP;
4322 hlreg0 |= TXGBE_SECRXCTL_CRCSTRIP;
4323 wr32(hw, TXGBE_SECRXCTL, hlreg0);
4326 * Configure jumbo frame support, if any.
4328 wr32m(hw, TXGBE_FRMSZ, TXGBE_FRMSZ_MAX_MASK,
4329 TXGBE_FRMSZ_MAX(dev->data->mtu + TXGBE_ETH_OVERHEAD));
4332 * If loopback mode is configured, set LPBK bit.
4334 hlreg0 = rd32(hw, TXGBE_PSRCTL);
4335 if (hw->mac.type == txgbe_mac_raptor &&
4336 dev->data->dev_conf.lpbk_mode)
4337 hlreg0 |= TXGBE_PSRCTL_LBENA;
4339 hlreg0 &= ~TXGBE_PSRCTL_LBENA;
4341 wr32(hw, TXGBE_PSRCTL, hlreg0);
4344 * Assume no header split and no VLAN strip support
4345 * on any Rx queue first .
4347 rx_conf->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4349 /* Setup RX queues */
4350 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4351 rxq = dev->data->rx_queues[i];
4354 * Reset crc_len in case it was changed after queue setup by a
4355 * call to configure.
4357 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
4358 rxq->crc_len = RTE_ETHER_CRC_LEN;
4362 /* Setup the Base and Length of the Rx Descriptor Rings */
4363 bus_addr = rxq->rx_ring_phys_addr;
4364 wr32(hw, TXGBE_RXBAL(rxq->reg_idx),
4365 (uint32_t)(bus_addr & BIT_MASK32));
4366 wr32(hw, TXGBE_RXBAH(rxq->reg_idx),
4367 (uint32_t)(bus_addr >> 32));
4368 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
4369 wr32(hw, TXGBE_RXWP(rxq->reg_idx), 0);
4371 srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
4373 /* Set if packets are dropped when no descriptors available */
4375 srrctl |= TXGBE_RXCFG_DROP;
4378 * Configure the RX buffer size in the PKTLEN field of
4379 * the RXCFG register of the queue.
4380 * The value is in 1 KB resolution. Valid values can be from
4383 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4384 RTE_PKTMBUF_HEADROOM);
4385 buf_size = ROUND_UP(buf_size, 0x1 << 10);
4386 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
4388 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), srrctl);
4390 /* It adds dual VLAN length for supporting dual VLAN */
4391 if (dev->data->mtu + TXGBE_ETH_OVERHEAD +
4392 2 * RTE_VLAN_HLEN > buf_size)
4393 dev->data->scattered_rx = 1;
4394 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4395 rx_conf->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4398 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
4399 dev->data->scattered_rx = 1;
4402 * Device configured with multiple RX queues.
4404 txgbe_dev_mq_rx_configure(dev);
4407 * Setup the Checksum Register.
4408 * Disable Full-Packet Checksum which is mutually exclusive with RSS.
4409 * Enable IP/L4 checksum computation by hardware if requested to do so.
4411 rxcsum = rd32(hw, TXGBE_PSRCTL);
4412 rxcsum |= TXGBE_PSRCTL_PCSD;
4413 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM)
4414 rxcsum |= TXGBE_PSRCTL_L4CSUM;
4416 rxcsum &= ~TXGBE_PSRCTL_L4CSUM;
4418 wr32(hw, TXGBE_PSRCTL, rxcsum);
4420 if (hw->mac.type == txgbe_mac_raptor) {
4421 rdrxctl = rd32(hw, TXGBE_SECRXCTL);
4422 if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC)
4423 rdrxctl &= ~TXGBE_SECRXCTL_CRCSTRIP;
4425 rdrxctl |= TXGBE_SECRXCTL_CRCSTRIP;
4426 wr32(hw, TXGBE_SECRXCTL, rdrxctl);
4429 rc = txgbe_set_rsc(dev);
4433 txgbe_set_rx_function(dev);
4439 * Initializes Transmit Unit.
4442 txgbe_dev_tx_init(struct rte_eth_dev *dev)
4444 struct txgbe_hw *hw;
4445 struct txgbe_tx_queue *txq;
4449 PMD_INIT_FUNC_TRACE();
4450 hw = TXGBE_DEV_HW(dev);
4452 /* Setup the Base and Length of the Tx Descriptor Rings */
4453 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4454 txq = dev->data->tx_queues[i];
4456 bus_addr = txq->tx_ring_phys_addr;
4457 wr32(hw, TXGBE_TXBAL(txq->reg_idx),
4458 (uint32_t)(bus_addr & BIT_MASK32));
4459 wr32(hw, TXGBE_TXBAH(txq->reg_idx),
4460 (uint32_t)(bus_addr >> 32));
4461 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_BUFLEN_MASK,
4462 TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
4463 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4464 wr32(hw, TXGBE_TXRP(txq->reg_idx), 0);
4465 wr32(hw, TXGBE_TXWP(txq->reg_idx), 0);
4468 /* Device configured with multiple TX queues. */
4469 txgbe_dev_mq_tx_configure(dev);
4473 * Set up link loopback mode Tx->Rx.
4475 static inline void __rte_cold
4476 txgbe_setup_loopback_link_raptor(struct txgbe_hw *hw)
4478 PMD_INIT_FUNC_TRACE();
4480 wr32m(hw, TXGBE_MACRXCFG, TXGBE_MACRXCFG_LB, TXGBE_MACRXCFG_LB);
4486 * Start Transmit and Receive Units.
4489 txgbe_dev_rxtx_start(struct rte_eth_dev *dev)
4491 struct txgbe_hw *hw;
4492 struct txgbe_tx_queue *txq;
4493 struct txgbe_rx_queue *rxq;
4499 PMD_INIT_FUNC_TRACE();
4500 hw = TXGBE_DEV_HW(dev);
4502 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4503 txq = dev->data->tx_queues[i];
4504 /* Setup Transmit Threshold Registers */
4505 wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
4506 TXGBE_TXCFG_HTHRESH_MASK |
4507 TXGBE_TXCFG_WTHRESH_MASK,
4508 TXGBE_TXCFG_HTHRESH(txq->hthresh) |
4509 TXGBE_TXCFG_WTHRESH(txq->wthresh));
4512 dmatxctl = rd32(hw, TXGBE_DMATXCTRL);
4513 dmatxctl |= TXGBE_DMATXCTRL_ENA;
4514 wr32(hw, TXGBE_DMATXCTRL, dmatxctl);
4516 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4517 txq = dev->data->tx_queues[i];
4518 if (!txq->tx_deferred_start) {
4519 ret = txgbe_dev_tx_queue_start(dev, i);
4525 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4526 rxq = dev->data->rx_queues[i];
4527 if (!rxq->rx_deferred_start) {
4528 ret = txgbe_dev_rx_queue_start(dev, i);
4534 /* Enable Receive engine */
4535 rxctrl = rd32(hw, TXGBE_PBRXCTL);
4536 rxctrl |= TXGBE_PBRXCTL_ENA;
4537 hw->mac.enable_rx_dma(hw, rxctrl);
4539 /* If loopback mode is enabled, set up the link accordingly */
4540 if (hw->mac.type == txgbe_mac_raptor &&
4541 dev->data->dev_conf.lpbk_mode)
4542 txgbe_setup_loopback_link_raptor(hw);
4544 #ifdef RTE_LIB_SECURITY
4545 if ((dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_SECURITY) ||
4546 (dev->data->dev_conf.txmode.offloads & RTE_ETH_TX_OFFLOAD_SECURITY)) {
4547 ret = txgbe_crypto_enable_ipsec(dev);
4550 "txgbe_crypto_enable_ipsec fails with %d.",
4561 txgbe_dev_save_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
4563 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
4564 *(reg++) = rd32(hw, TXGBE_RXBAL(rx_queue_id));
4565 *(reg++) = rd32(hw, TXGBE_RXBAH(rx_queue_id));
4566 *(reg++) = rd32(hw, TXGBE_RXCFG(rx_queue_id));
4570 txgbe_dev_store_rx_queue(struct txgbe_hw *hw, uint16_t rx_queue_id)
4572 u32 *reg = &hw->q_rx_regs[rx_queue_id * 8];
4573 wr32(hw, TXGBE_RXBAL(rx_queue_id), *(reg++));
4574 wr32(hw, TXGBE_RXBAH(rx_queue_id), *(reg++));
4575 wr32(hw, TXGBE_RXCFG(rx_queue_id), *(reg++) & ~TXGBE_RXCFG_ENA);
4579 txgbe_dev_save_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
4581 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
4582 *(reg++) = rd32(hw, TXGBE_TXBAL(tx_queue_id));
4583 *(reg++) = rd32(hw, TXGBE_TXBAH(tx_queue_id));
4584 *(reg++) = rd32(hw, TXGBE_TXCFG(tx_queue_id));
4588 txgbe_dev_store_tx_queue(struct txgbe_hw *hw, uint16_t tx_queue_id)
4590 u32 *reg = &hw->q_tx_regs[tx_queue_id * 8];
4591 wr32(hw, TXGBE_TXBAL(tx_queue_id), *(reg++));
4592 wr32(hw, TXGBE_TXBAH(tx_queue_id), *(reg++));
4593 wr32(hw, TXGBE_TXCFG(tx_queue_id), *(reg++) & ~TXGBE_TXCFG_ENA);
4597 * Start Receive Units for specified queue.
4600 txgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4602 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4603 struct txgbe_rx_queue *rxq;
4607 PMD_INIT_FUNC_TRACE();
4609 rxq = dev->data->rx_queues[rx_queue_id];
4611 /* Allocate buffers for descriptor rings */
4612 if (txgbe_alloc_rx_queue_mbufs(rxq) != 0) {
4613 PMD_INIT_LOG(ERR, "Could not alloc mbuf for queue:%d",
4617 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
4618 rxdctl |= TXGBE_RXCFG_ENA;
4619 wr32(hw, TXGBE_RXCFG(rxq->reg_idx), rxdctl);
4621 /* Wait until RX Enable ready */
4622 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4625 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
4626 } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
4628 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", rx_queue_id);
4630 wr32(hw, TXGBE_RXRP(rxq->reg_idx), 0);
4631 wr32(hw, TXGBE_RXWP(rxq->reg_idx), rxq->nb_rx_desc - 1);
4632 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4638 * Stop Receive Units for specified queue.
4641 txgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
4643 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4644 struct txgbe_adapter *adapter = TXGBE_DEV_ADAPTER(dev);
4645 struct txgbe_rx_queue *rxq;
4649 PMD_INIT_FUNC_TRACE();
4651 rxq = dev->data->rx_queues[rx_queue_id];
4653 txgbe_dev_save_rx_queue(hw, rxq->reg_idx);
4654 wr32m(hw, TXGBE_RXCFG(rxq->reg_idx), TXGBE_RXCFG_ENA, 0);
4656 /* Wait until RX Enable bit clear */
4657 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4660 rxdctl = rd32(hw, TXGBE_RXCFG(rxq->reg_idx));
4661 } while (--poll_ms && (rxdctl & TXGBE_RXCFG_ENA));
4663 PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id);
4665 rte_delay_us(RTE_TXGBE_WAIT_100_US);
4666 txgbe_dev_store_rx_queue(hw, rxq->reg_idx);
4668 txgbe_rx_queue_release_mbufs(rxq);
4669 txgbe_reset_rx_queue(adapter, rxq);
4670 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4676 * Start Transmit Units for specified queue.
4679 txgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4681 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4682 struct txgbe_tx_queue *txq;
4686 PMD_INIT_FUNC_TRACE();
4688 txq = dev->data->tx_queues[tx_queue_id];
4689 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
4691 /* Wait until TX Enable ready */
4692 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4695 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
4696 } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
4698 PMD_INIT_LOG(ERR, "Could not enable "
4699 "Tx Queue %d", tx_queue_id);
4702 wr32(hw, TXGBE_TXWP(txq->reg_idx), txq->tx_tail);
4703 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
4709 * Stop Transmit Units for specified queue.
4712 txgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
4714 struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
4715 struct txgbe_tx_queue *txq;
4717 uint32_t txtdh, txtdt;
4720 PMD_INIT_FUNC_TRACE();
4722 txq = dev->data->tx_queues[tx_queue_id];
4724 /* Wait until TX queue is empty */
4725 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4727 rte_delay_us(RTE_TXGBE_WAIT_100_US);
4728 txtdh = rd32(hw, TXGBE_TXRP(txq->reg_idx));
4729 txtdt = rd32(hw, TXGBE_TXWP(txq->reg_idx));
4730 } while (--poll_ms && (txtdh != txtdt));
4733 "Tx Queue %d is not empty when stopping.",
4736 txgbe_dev_save_tx_queue(hw, txq->reg_idx);
4737 wr32m(hw, TXGBE_TXCFG(txq->reg_idx), TXGBE_TXCFG_ENA, 0);
4739 /* Wait until TX Enable bit clear */
4740 poll_ms = RTE_TXGBE_REGISTER_POLL_WAIT_10_MS;
4743 txdctl = rd32(hw, TXGBE_TXCFG(txq->reg_idx));
4744 } while (--poll_ms && (txdctl & TXGBE_TXCFG_ENA));
4746 PMD_INIT_LOG(ERR, "Could not disable Tx Queue %d",
4749 rte_delay_us(RTE_TXGBE_WAIT_100_US);
4750 txgbe_dev_store_tx_queue(hw, txq->reg_idx);
4752 if (txq->ops != NULL) {
4753 txq->ops->release_mbufs(txq);
4754 txq->ops->reset(txq);
4756 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
4762 txgbe_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4763 struct rte_eth_rxq_info *qinfo)
4765 struct txgbe_rx_queue *rxq;
4767 rxq = dev->data->rx_queues[queue_id];
4769 qinfo->mp = rxq->mb_pool;
4770 qinfo->scattered_rx = dev->data->scattered_rx;
4771 qinfo->nb_desc = rxq->nb_rx_desc;
4773 qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
4774 qinfo->conf.rx_drop_en = rxq->drop_en;
4775 qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
4776 qinfo->conf.offloads = rxq->offloads;
4780 txgbe_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
4781 struct rte_eth_txq_info *qinfo)
4783 struct txgbe_tx_queue *txq;
4785 txq = dev->data->tx_queues[queue_id];
4787 qinfo->nb_desc = txq->nb_tx_desc;
4789 qinfo->conf.tx_thresh.pthresh = txq->pthresh;
4790 qinfo->conf.tx_thresh.hthresh = txq->hthresh;
4791 qinfo->conf.tx_thresh.wthresh = txq->wthresh;
4793 qinfo->conf.tx_free_thresh = txq->tx_free_thresh;
4794 qinfo->conf.offloads = txq->offloads;
4795 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
4799 * [VF] Initializes Receive Unit.
4802 txgbevf_dev_rx_init(struct rte_eth_dev *dev)
4804 struct txgbe_hw *hw;
4805 struct txgbe_rx_queue *rxq;
4806 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
4808 uint32_t srrctl, psrtype;
4813 PMD_INIT_FUNC_TRACE();
4814 hw = TXGBE_DEV_HW(dev);
4816 if (rte_is_power_of_2(dev->data->nb_rx_queues) == 0) {
4817 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4818 "it should be power of 2");
4822 if (dev->data->nb_rx_queues > hw->mac.max_rx_queues) {
4823 PMD_INIT_LOG(ERR, "The number of Rx queue invalid, "
4824 "it should be equal to or less than %d",
4825 hw->mac.max_rx_queues);
4830 * When the VF driver issues a TXGBE_VF_RESET request, the PF driver
4831 * disables the VF receipt of packets if the PF MTU is > 1500.
4832 * This is done to deal with limitations that imposes
4833 * the PF and all VFs to share the same MTU.
4834 * Then, the PF driver enables again the VF receipt of packet when
4835 * the VF driver issues a TXGBE_VF_SET_LPE request.
4836 * In the meantime, the VF device cannot be used, even if the VF driver
4837 * and the Guest VM network stack are ready to accept packets with a
4838 * size up to the PF MTU.
4839 * As a work-around to this PF behaviour, force the call to
4840 * txgbevf_rlpml_set_vf even if jumbo frames are not used. This way,
4841 * VF packets received can work in all cases.
4843 if (txgbevf_rlpml_set_vf(hw,
4844 (uint16_t)dev->data->mtu + TXGBE_ETH_OVERHEAD)) {
4845 PMD_INIT_LOG(ERR, "Set max packet length to %d failed.",
4846 dev->data->mtu + TXGBE_ETH_OVERHEAD);
4851 * Assume no header split and no VLAN strip support
4852 * on any Rx queue first .
4854 rxmode->offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4856 /* Set PSR type for VF RSS according to max Rx queue */
4857 psrtype = TXGBE_VFPLCFG_PSRL4HDR |
4858 TXGBE_VFPLCFG_PSRL4HDR |
4859 TXGBE_VFPLCFG_PSRL2HDR |
4860 TXGBE_VFPLCFG_PSRTUNHDR |
4861 TXGBE_VFPLCFG_PSRTUNMAC;
4862 wr32(hw, TXGBE_VFPLCFG, TXGBE_VFPLCFG_PSR(psrtype));
4864 /* Setup RX queues */
4865 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4866 rxq = dev->data->rx_queues[i];
4868 /* Allocate buffers for descriptor rings */
4869 ret = txgbe_alloc_rx_queue_mbufs(rxq);
4873 /* Setup the Base and Length of the Rx Descriptor Rings */
4874 bus_addr = rxq->rx_ring_phys_addr;
4876 wr32(hw, TXGBE_RXBAL(i),
4877 (uint32_t)(bus_addr & BIT_MASK32));
4878 wr32(hw, TXGBE_RXBAH(i),
4879 (uint32_t)(bus_addr >> 32));
4880 wr32(hw, TXGBE_RXRP(i), 0);
4881 wr32(hw, TXGBE_RXWP(i), 0);
4883 /* Configure the RXCFG register */
4884 srrctl = TXGBE_RXCFG_RNGLEN(rxq->nb_rx_desc);
4886 /* Set if packets are dropped when no descriptors available */
4888 srrctl |= TXGBE_RXCFG_DROP;
4891 * Configure the RX buffer size in the PKTLEN field of
4892 * the RXCFG register of the queue.
4893 * The value is in 1 KB resolution. Valid values can be from
4896 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
4897 RTE_PKTMBUF_HEADROOM);
4898 buf_size = ROUND_UP(buf_size, 1 << 10);
4899 srrctl |= TXGBE_RXCFG_PKTLEN(buf_size);
4902 * VF modification to write virtual function RXCFG register
4904 wr32(hw, TXGBE_RXCFG(i), srrctl);
4906 if (rxmode->offloads & RTE_ETH_RX_OFFLOAD_SCATTER ||
4907 /* It adds dual VLAN length for supporting dual VLAN */
4908 (dev->data->mtu + TXGBE_ETH_OVERHEAD +
4909 2 * RTE_VLAN_HLEN) > buf_size) {
4910 if (!dev->data->scattered_rx)
4911 PMD_INIT_LOG(DEBUG, "forcing scatter mode");
4912 dev->data->scattered_rx = 1;
4915 if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
4916 rxmode->offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
4920 * Device configured with multiple RX queues.
4922 txgbe_dev_mq_rx_configure(dev);
4924 txgbe_set_rx_function(dev);
4930 * [VF] Initializes Transmit Unit.
4933 txgbevf_dev_tx_init(struct rte_eth_dev *dev)
4935 struct txgbe_hw *hw;
4936 struct txgbe_tx_queue *txq;
4940 PMD_INIT_FUNC_TRACE();
4941 hw = TXGBE_DEV_HW(dev);
4943 /* Setup the Base and Length of the Tx Descriptor Rings */
4944 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4945 txq = dev->data->tx_queues[i];
4946 bus_addr = txq->tx_ring_phys_addr;
4947 wr32(hw, TXGBE_TXBAL(i),
4948 (uint32_t)(bus_addr & BIT_MASK32));
4949 wr32(hw, TXGBE_TXBAH(i),
4950 (uint32_t)(bus_addr >> 32));
4951 wr32m(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_BUFLEN_MASK,
4952 TXGBE_TXCFG_BUFLEN(txq->nb_tx_desc));
4953 /* Setup the HW Tx Head and TX Tail descriptor pointers */
4954 wr32(hw, TXGBE_TXRP(i), 0);
4955 wr32(hw, TXGBE_TXWP(i), 0);
4960 * [VF] Start Transmit and Receive Units.
4963 txgbevf_dev_rxtx_start(struct rte_eth_dev *dev)
4965 struct txgbe_hw *hw;
4966 struct txgbe_tx_queue *txq;
4967 struct txgbe_rx_queue *rxq;
4973 PMD_INIT_FUNC_TRACE();
4974 hw = TXGBE_DEV_HW(dev);
4976 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4977 txq = dev->data->tx_queues[i];
4978 /* Setup Transmit Threshold Registers */
4979 wr32m(hw, TXGBE_TXCFG(txq->reg_idx),
4980 TXGBE_TXCFG_HTHRESH_MASK |
4981 TXGBE_TXCFG_WTHRESH_MASK,
4982 TXGBE_TXCFG_HTHRESH(txq->hthresh) |
4983 TXGBE_TXCFG_WTHRESH(txq->wthresh));
4986 for (i = 0; i < dev->data->nb_tx_queues; i++) {
4987 wr32m(hw, TXGBE_TXCFG(i), TXGBE_TXCFG_ENA, TXGBE_TXCFG_ENA);
4990 /* Wait until TX Enable ready */
4993 txdctl = rd32(hw, TXGBE_TXCFG(i));
4994 } while (--poll_ms && !(txdctl & TXGBE_TXCFG_ENA));
4996 PMD_INIT_LOG(ERR, "Could not enable Tx Queue %d", i);
4998 for (i = 0; i < dev->data->nb_rx_queues; i++) {
4999 rxq = dev->data->rx_queues[i];
5001 wr32m(hw, TXGBE_RXCFG(i), TXGBE_RXCFG_ENA, TXGBE_RXCFG_ENA);
5003 /* Wait until RX Enable ready */
5007 rxdctl = rd32(hw, TXGBE_RXCFG(i));
5008 } while (--poll_ms && !(rxdctl & TXGBE_RXCFG_ENA));
5010 PMD_INIT_LOG(ERR, "Could not enable Rx Queue %d", i);
5012 wr32(hw, TXGBE_RXWP(i), rxq->nb_rx_desc - 1);
5017 txgbe_rss_conf_init(struct txgbe_rte_flow_rss_conf *out,
5018 const struct rte_flow_action_rss *in)
5020 if (in->key_len > RTE_DIM(out->key) ||
5021 in->queue_num > RTE_DIM(out->queue))
5023 out->conf = (struct rte_flow_action_rss){
5027 .key_len = in->key_len,
5028 .queue_num = in->queue_num,
5029 .key = memcpy(out->key, in->key, in->key_len),
5030 .queue = memcpy(out->queue, in->queue,
5031 sizeof(*in->queue) * in->queue_num),
5037 txgbe_action_rss_same(const struct rte_flow_action_rss *comp,
5038 const struct rte_flow_action_rss *with)
5040 return (comp->func == with->func &&
5041 comp->level == with->level &&
5042 comp->types == with->types &&
5043 comp->key_len == with->key_len &&
5044 comp->queue_num == with->queue_num &&
5045 !memcmp(comp->key, with->key, with->key_len) &&
5046 !memcmp(comp->queue, with->queue,
5047 sizeof(*with->queue) * with->queue_num));
5051 txgbe_config_rss_filter(struct rte_eth_dev *dev,
5052 struct txgbe_rte_flow_rss_conf *conf, bool add)
5054 struct txgbe_hw *hw;
5058 struct rte_eth_rss_conf rss_conf = {
5059 .rss_key = conf->conf.key_len ?
5060 (void *)(uintptr_t)conf->conf.key : NULL,
5061 .rss_key_len = conf->conf.key_len,
5062 .rss_hf = conf->conf.types,
5064 struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
5066 PMD_INIT_FUNC_TRACE();
5067 hw = TXGBE_DEV_HW(dev);
5070 if (txgbe_action_rss_same(&filter_info->rss_info.conf,
5072 txgbe_rss_disable(dev);
5073 memset(&filter_info->rss_info, 0,
5074 sizeof(struct txgbe_rte_flow_rss_conf));
5080 if (filter_info->rss_info.conf.queue_num)
5082 /* Fill in redirection table
5083 * The byte-swap is needed because NIC registers are in
5084 * little-endian order.
5087 for (i = 0, j = 0; i < RTE_ETH_RSS_RETA_SIZE_128; i++, j++) {
5088 if (j == conf->conf.queue_num)
5090 reta = (reta >> 8) | LS32(conf->conf.queue[j], 24, 0xFF);
5092 wr32at(hw, TXGBE_REG_RSSTBL, i >> 2, reta);
5095 /* Configure the RSS key and the RSS protocols used to compute
5096 * the RSS hash of input packets.
5098 if ((rss_conf.rss_hf & TXGBE_RSS_OFFLOAD_ALL) == 0) {
5099 txgbe_rss_disable(dev);
5102 if (rss_conf.rss_key == NULL)
5103 rss_conf.rss_key = rss_intel_key; /* Default hash key */
5104 txgbe_dev_rss_hash_update(dev, &rss_conf);
5106 if (txgbe_rss_conf_init(&filter_info->rss_info, &conf->conf))