1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 /*****************************************************************************
11 *****************************************************************************/
28 } qw0; /* also as r.pkt_addr */
43 } qw1; /* also as r.hdr_addr */
46 /* @ngbe_rx_desc.qw0 */
47 #define NGBE_RXD_PKTADDR(rxd, v) \
48 (((volatile __le64 *)(rxd))[0] = cpu_to_le64(v))
50 /* @ngbe_rx_desc.qw1 */
51 #define NGBE_RXD_HDRADDR(rxd, v) \
52 (((volatile __le64 *)(rxd))[1] = cpu_to_le64(v))
54 /*****************************************************************************
56 *****************************************************************************/
58 * Transmit Context Descriptor (NGBE_TXD_TYP=CTXT)
60 struct ngbe_tx_ctx_desc {
61 rte_le32_t dw0; /* w.vlan_macip_lens */
62 rte_le32_t dw1; /* w.seqnum_seed */
63 rte_le32_t dw2; /* w.type_tucmd_mlhl */
64 rte_le32_t dw3; /* w.mss_l4len_idx */
67 /* @ngbe_tx_ctx_desc.dw3 */
68 #define NGBE_TXD_DD MS(0, 0x1) /* descriptor done */
71 * Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
74 rte_le64_t qw0; /* r.buffer_addr , w.reserved */
75 rte_le32_t dw2; /* r.cmd_type_len, w.nxtseq_seed */
76 rte_le32_t dw3; /* r.olinfo_status, w.status */
79 #define RTE_PMD_NGBE_RX_MAX_BURST 32
81 #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
82 sizeof(struct ngbe_rx_desc))
84 #define RTE_NGBE_REGISTER_POLL_WAIT_10_MS 10
85 #define RTE_NGBE_WAIT_100_US 100
87 #define NGBE_TX_MAX_SEG 40
89 #ifndef DEFAULT_TX_FREE_THRESH
90 #define DEFAULT_TX_FREE_THRESH 32
94 * Structure associated with each descriptor of the Rx ring of a Rx queue.
96 struct ngbe_rx_entry {
97 struct rte_mbuf *mbuf; /**< mbuf associated with Rx descriptor. */
100 struct ngbe_scattered_rx_entry {
101 struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
105 * Structure associated with each descriptor of the Tx ring of a Tx queue.
107 struct ngbe_tx_entry {
108 struct rte_mbuf *mbuf; /**< mbuf associated with Tx desc, if any. */
109 uint16_t next_id; /**< Index of next descriptor in ring. */
110 uint16_t last_id; /**< Index of last scattered descriptor. */
114 * Structure associated with each Rx queue.
116 struct ngbe_rx_queue {
117 struct rte_mempool *mb_pool; /**< mbuf pool to populate Rx ring */
118 uint64_t rx_ring_phys_addr; /**< Rx ring DMA address */
119 volatile uint32_t *rdt_reg_addr; /**< RDT register address */
120 volatile uint32_t *rdh_reg_addr; /**< RDH register address */
122 volatile struct ngbe_rx_desc *rx_ring; /**< Rx ring virtual address */
123 /** address of Rx software ring */
124 struct ngbe_rx_entry *sw_ring;
125 /** address of scattered Rx software ring */
126 struct ngbe_scattered_rx_entry *sw_sc_ring;
128 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet */
129 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet */
130 uint16_t nb_rx_desc; /**< number of Rx descriptors */
131 uint16_t rx_tail; /**< current value of RDT register */
132 uint16_t nb_rx_hold; /**< number of held free Rx desc */
134 uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
135 uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
136 uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
138 uint16_t rx_free_thresh; /**< max free Rx desc to hold */
139 uint16_t queue_id; /**< RX queue index */
140 uint16_t reg_idx; /**< RX queue register index */
141 uint16_t port_id; /**< Device port identifier */
142 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En */
143 uint8_t rx_deferred_start; /**< not in global dev start */
144 /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
145 struct rte_mbuf fake_mbuf;
146 /** hold packets to return to application */
147 struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
154 NGBE_CTX_0 = 0, /**< CTX0 */
155 NGBE_CTX_1 = 1, /**< CTX1 */
156 NGBE_CTX_NUM = 2, /**< CTX NUMBER */
160 * Structure to check if new context need be built
162 struct ngbe_ctx_info {
163 uint64_t flags; /**< ol_flags for context build. */
167 * Structure associated with each Tx queue.
169 struct ngbe_tx_queue {
170 /** Tx ring virtual address */
171 volatile struct ngbe_tx_desc *tx_ring;
173 uint64_t tx_ring_phys_addr; /**< Tx ring DMA address */
174 struct ngbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD */
175 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register */
176 volatile uint32_t *tdc_reg_addr; /**< Address of TDC register */
177 uint16_t nb_tx_desc; /**< number of Tx descriptors */
178 uint16_t tx_tail; /**< current value of TDT reg */
180 * Start freeing Tx buffers if there are less free descriptors than
183 uint16_t tx_free_thresh;
184 /** Index to last Tx descriptor to have been cleaned */
185 uint16_t last_desc_cleaned;
186 /** Total number of Tx descriptors ready to be allocated */
188 uint16_t tx_next_dd; /**< next desc to scan for DD bit */
189 uint16_t queue_id; /**< Tx queue index */
190 uint16_t reg_idx; /**< Tx queue register index */
191 uint16_t port_id; /**< Device port identifier */
192 uint8_t pthresh; /**< Prefetch threshold register */
193 uint8_t hthresh; /**< Host threshold register */
194 uint8_t wthresh; /**< Write-back threshold reg */
195 uint32_t ctx_curr; /**< Hardware context states */
196 /** Hardware context0 history */
197 struct ngbe_ctx_info ctx_cache[NGBE_CTX_NUM];
198 uint8_t tx_deferred_start; /**< not in global dev start */
200 const struct ngbe_txq_ops *ops; /**< txq ops */
203 struct ngbe_txq_ops {
204 void (*release_mbufs)(struct ngbe_tx_queue *txq);
205 void (*free_swring)(struct ngbe_tx_queue *txq);
206 void (*reset)(struct ngbe_tx_queue *txq);
209 #endif /* _NGBE_RXTX_H_ */