1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 /*****************************************************************************
11 *****************************************************************************/
28 } qw0; /* also as r.pkt_addr */
43 } qw1; /* also as r.hdr_addr */
46 /*****************************************************************************
48 *****************************************************************************/
50 * Transmit Context Descriptor (NGBE_TXD_TYP=CTXT)
52 struct ngbe_tx_ctx_desc {
53 rte_le32_t dw0; /* w.vlan_macip_lens */
54 rte_le32_t dw1; /* w.seqnum_seed */
55 rte_le32_t dw2; /* w.type_tucmd_mlhl */
56 rte_le32_t dw3; /* w.mss_l4len_idx */
59 /* @ngbe_tx_ctx_desc.dw3 */
60 #define NGBE_TXD_DD MS(0, 0x1) /* descriptor done */
63 * Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
66 rte_le64_t qw0; /* r.buffer_addr , w.reserved */
67 rte_le32_t dw2; /* r.cmd_type_len, w.nxtseq_seed */
68 rte_le32_t dw3; /* r.olinfo_status, w.status */
71 #define RTE_PMD_NGBE_RX_MAX_BURST 32
73 #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
74 sizeof(struct ngbe_rx_desc))
76 #define RTE_NGBE_REGISTER_POLL_WAIT_10_MS 10
77 #define RTE_NGBE_WAIT_100_US 100
79 #define NGBE_TX_MAX_SEG 40
81 #ifndef DEFAULT_TX_FREE_THRESH
82 #define DEFAULT_TX_FREE_THRESH 32
86 * Structure associated with each descriptor of the Rx ring of a Rx queue.
88 struct ngbe_rx_entry {
89 struct rte_mbuf *mbuf; /**< mbuf associated with Rx descriptor. */
92 struct ngbe_scattered_rx_entry {
93 struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
97 * Structure associated with each descriptor of the Tx ring of a Tx queue.
99 struct ngbe_tx_entry {
100 struct rte_mbuf *mbuf; /**< mbuf associated with Tx desc, if any. */
101 uint16_t next_id; /**< Index of next descriptor in ring. */
102 uint16_t last_id; /**< Index of last scattered descriptor. */
106 * Structure associated with each Rx queue.
108 struct ngbe_rx_queue {
109 struct rte_mempool *mb_pool; /**< mbuf pool to populate Rx ring */
110 uint64_t rx_ring_phys_addr; /**< Rx ring DMA address */
111 volatile uint32_t *rdt_reg_addr; /**< RDT register address */
112 volatile uint32_t *rdh_reg_addr; /**< RDH register address */
114 volatile struct ngbe_rx_desc *rx_ring; /**< Rx ring virtual address */
115 /** address of Rx software ring */
116 struct ngbe_rx_entry *sw_ring;
117 /** address of scattered Rx software ring */
118 struct ngbe_scattered_rx_entry *sw_sc_ring;
120 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet */
121 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet */
122 uint16_t nb_rx_desc; /**< number of Rx descriptors */
123 uint16_t rx_tail; /**< current value of RDT register */
124 uint16_t nb_rx_hold; /**< number of held free Rx desc */
126 uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
127 uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
128 uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
130 uint16_t rx_free_thresh; /**< max free Rx desc to hold */
131 uint16_t queue_id; /**< RX queue index */
132 uint16_t reg_idx; /**< RX queue register index */
133 uint16_t port_id; /**< Device port identifier */
134 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En */
135 uint8_t rx_deferred_start; /**< not in global dev start */
136 /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
137 struct rte_mbuf fake_mbuf;
138 /** hold packets to return to application */
139 struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
146 NGBE_CTX_0 = 0, /**< CTX0 */
147 NGBE_CTX_1 = 1, /**< CTX1 */
148 NGBE_CTX_NUM = 2, /**< CTX NUMBER */
152 * Structure to check if new context need be built
154 struct ngbe_ctx_info {
155 uint64_t flags; /**< ol_flags for context build. */
159 * Structure associated with each Tx queue.
161 struct ngbe_tx_queue {
162 /** Tx ring virtual address */
163 volatile struct ngbe_tx_desc *tx_ring;
165 uint64_t tx_ring_phys_addr; /**< Tx ring DMA address */
166 struct ngbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD */
167 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register */
168 volatile uint32_t *tdc_reg_addr; /**< Address of TDC register */
169 uint16_t nb_tx_desc; /**< number of Tx descriptors */
170 uint16_t tx_tail; /**< current value of TDT reg */
172 * Start freeing Tx buffers if there are less free descriptors than
175 uint16_t tx_free_thresh;
176 /** Index to last Tx descriptor to have been cleaned */
177 uint16_t last_desc_cleaned;
178 /** Total number of Tx descriptors ready to be allocated */
180 uint16_t tx_next_dd; /**< next desc to scan for DD bit */
181 uint16_t queue_id; /**< Tx queue index */
182 uint16_t reg_idx; /**< Tx queue register index */
183 uint16_t port_id; /**< Device port identifier */
184 uint8_t pthresh; /**< Prefetch threshold register */
185 uint8_t hthresh; /**< Host threshold register */
186 uint8_t wthresh; /**< Write-back threshold reg */
187 uint32_t ctx_curr; /**< Hardware context states */
188 /** Hardware context0 history */
189 struct ngbe_ctx_info ctx_cache[NGBE_CTX_NUM];
190 uint8_t tx_deferred_start; /**< not in global dev start */
192 const struct ngbe_txq_ops *ops; /**< txq ops */
195 struct ngbe_txq_ops {
196 void (*release_mbufs)(struct ngbe_tx_queue *txq);
197 void (*free_swring)(struct ngbe_tx_queue *txq);
198 void (*reset)(struct ngbe_tx_queue *txq);
201 #endif /* _NGBE_RXTX_H_ */