1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
8 /*****************************************************************************
10 *****************************************************************************/
11 struct txgbe_rx_desc {
27 } qw0; /* also as r.pkt_addr */
42 } qw1; /* also as r.hdr_addr */
46 * Transmit Data Descriptor (TXGBE_TXD_TYP=DATA)
48 struct txgbe_tx_desc {
49 __le64 qw0; /* r.buffer_addr , w.reserved */
50 __le32 dw2; /* r.cmd_type_len, w.nxtseq_seed */
51 __le32 dw3; /* r.olinfo_status, w.status */
54 #define RTE_PMD_TXGBE_TX_MAX_BURST 32
55 #define RTE_PMD_TXGBE_RX_MAX_BURST 32
57 #define RX_RING_SZ ((TXGBE_RING_DESC_MAX + RTE_PMD_TXGBE_RX_MAX_BURST) * \
58 sizeof(struct txgbe_rx_desc))
60 #define TXGBE_PTID_MASK 0xFF
62 #define TXGBE_TX_MAX_SEG 40
65 * Structure associated with each descriptor of the RX ring of a RX queue.
67 struct txgbe_rx_entry {
68 struct rte_mbuf *mbuf; /**< mbuf associated with RX descriptor. */
71 struct txgbe_scattered_rx_entry {
72 struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
76 * Structure associated with each descriptor of the TX ring of a TX queue.
78 struct txgbe_tx_entry {
79 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
80 uint16_t next_id; /**< Index of next descriptor in ring. */
81 uint16_t last_id; /**< Index of last scattered descriptor. */
85 * Structure associated with each descriptor of the TX ring of a TX queue.
87 struct txgbe_tx_entry_v {
88 struct rte_mbuf *mbuf; /**< mbuf associated with TX desc, if any. */
92 * Structure associated with each RX queue.
94 struct txgbe_rx_queue {
95 struct rte_mempool *mb_pool; /**< mbuf pool to populate RX ring. */
96 volatile struct txgbe_rx_desc *rx_ring; /**< RX ring virtual address. */
97 uint64_t rx_ring_phys_addr; /**< RX ring DMA address. */
98 volatile uint32_t *rdt_reg_addr; /**< RDT register address. */
99 volatile uint32_t *rdh_reg_addr; /**< RDH register address. */
100 struct txgbe_rx_entry *sw_ring; /**< address of RX software ring. */
101 /**< address of scattered Rx software ring. */
102 struct txgbe_scattered_rx_entry *sw_sc_ring;
103 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet. */
104 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet. */
105 uint16_t nb_rx_desc; /**< number of RX descriptors. */
106 uint16_t rx_tail; /**< current value of RDT register. */
107 uint16_t nb_rx_hold; /**< number of held free RX desc. */
108 uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
109 uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
110 uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
111 uint16_t rx_free_thresh; /**< max free RX desc to hold. */
112 uint16_t queue_id; /**< RX queue index. */
113 uint16_t reg_idx; /**< RX queue register index. */
114 /**< Packet type mask for different NICs. */
115 uint16_t pkt_type_mask;
116 uint16_t port_id; /**< Device port identifier. */
117 uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */
118 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */
119 uint8_t rx_deferred_start; /**< not in global dev start. */
120 uint64_t offloads; /**< Rx offloads with DEV_RX_OFFLOAD_* */
121 /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
122 struct rte_mbuf fake_mbuf;
123 /** hold packets to return to application */
124 struct rte_mbuf *rx_stage[RTE_PMD_TXGBE_RX_MAX_BURST * 2];
128 * Structure associated with each TX queue.
130 struct txgbe_tx_queue {
131 /** TX ring virtual address. */
132 volatile struct txgbe_tx_desc *tx_ring;
133 uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
135 /**< address of SW ring for scalar PMD. */
136 struct txgbe_tx_entry *sw_ring;
137 /**< address of SW ring for vector PMD */
138 struct txgbe_tx_entry_v *sw_ring_v;
140 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
141 volatile uint32_t *tdc_reg_addr; /**< Address of TDC register. */
142 uint16_t nb_tx_desc; /**< number of TX descriptors. */
143 /**< Start freeing TX buffers if there are less free descriptors than
146 uint16_t tx_free_thresh;
147 uint16_t queue_id; /**< TX queue index. */
148 uint16_t reg_idx; /**< TX queue register index. */
149 uint16_t port_id; /**< Device port identifier. */
150 uint8_t pthresh; /**< Prefetch threshold register. */
151 uint8_t hthresh; /**< Host threshold register. */
152 uint8_t wthresh; /**< Write-back threshold reg. */
153 uint64_t offloads; /**< Tx offload flags of DEV_TX_OFFLOAD_* */
154 const struct txgbe_txq_ops *ops; /**< txq ops */
155 uint8_t tx_deferred_start; /**< not in global dev start. */
158 struct txgbe_txq_ops {
159 void (*release_mbufs)(struct txgbe_tx_queue *txq);
160 void (*free_swring)(struct txgbe_tx_queue *txq);
161 void (*reset)(struct txgbe_tx_queue *txq);
164 /* Takes an ethdev and a queue and sets up the tx function to be used based on
165 * the queue parameters. Used in tx_queue_setup by primary process and then
166 * in dev_init by secondary process when attaching to an existing ethdev.
168 void txgbe_set_tx_function(struct rte_eth_dev *dev, struct txgbe_tx_queue *txq);
170 void txgbe_set_rx_function(struct rte_eth_dev *dev);
172 uint64_t txgbe_get_tx_port_offloads(struct rte_eth_dev *dev);
173 uint64_t txgbe_get_rx_queue_offloads(struct rte_eth_dev *dev);
174 uint64_t txgbe_get_rx_port_offloads(struct rte_eth_dev *dev);
175 uint64_t txgbe_get_tx_queue_offloads(struct rte_eth_dev *dev);
177 #endif /* _TXGBE_RXTX_H_ */