1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018-2021 Beijing WangXun Technology Co., Ltd.
3 * Copyright(c) 2010-2017 Intel Corporation
9 /*****************************************************************************
11 *****************************************************************************/
28 } qw0; /* also as r.pkt_addr */
43 } qw1; /* also as r.hdr_addr */
46 /*****************************************************************************
48 *****************************************************************************/
50 * Transmit Context Descriptor (NGBE_TXD_TYP=CTXT)
52 struct ngbe_tx_ctx_desc {
53 rte_le32_t dw0; /* w.vlan_macip_lens */
54 rte_le32_t dw1; /* w.seqnum_seed */
55 rte_le32_t dw2; /* w.type_tucmd_mlhl */
56 rte_le32_t dw3; /* w.mss_l4len_idx */
59 /* @ngbe_tx_ctx_desc.dw3 */
60 #define NGBE_TXD_DD MS(0, 0x1) /* descriptor done */
63 * Transmit Data Descriptor (NGBE_TXD_TYP=DATA)
66 rte_le64_t qw0; /* r.buffer_addr , w.reserved */
67 rte_le32_t dw2; /* r.cmd_type_len, w.nxtseq_seed */
68 rte_le32_t dw3; /* r.olinfo_status, w.status */
71 #define RTE_PMD_NGBE_RX_MAX_BURST 32
73 #define RX_RING_SZ ((NGBE_RING_DESC_MAX + RTE_PMD_NGBE_RX_MAX_BURST) * \
74 sizeof(struct ngbe_rx_desc))
76 #define NGBE_TX_MAX_SEG 40
78 #ifndef DEFAULT_TX_FREE_THRESH
79 #define DEFAULT_TX_FREE_THRESH 32
83 * Structure associated with each descriptor of the Rx ring of a Rx queue.
85 struct ngbe_rx_entry {
86 struct rte_mbuf *mbuf; /**< mbuf associated with Rx descriptor. */
89 struct ngbe_scattered_rx_entry {
90 struct rte_mbuf *fbuf; /**< First segment of the fragmented packet. */
94 * Structure associated with each descriptor of the Tx ring of a Tx queue.
96 struct ngbe_tx_entry {
97 struct rte_mbuf *mbuf; /**< mbuf associated with Tx desc, if any. */
98 uint16_t next_id; /**< Index of next descriptor in ring. */
99 uint16_t last_id; /**< Index of last scattered descriptor. */
103 * Structure associated with each Rx queue.
105 struct ngbe_rx_queue {
106 struct rte_mempool *mb_pool; /**< mbuf pool to populate Rx ring */
107 uint64_t rx_ring_phys_addr; /**< Rx ring DMA address */
108 volatile uint32_t *rdt_reg_addr; /**< RDT register address */
109 volatile uint32_t *rdh_reg_addr; /**< RDH register address */
111 volatile struct ngbe_rx_desc *rx_ring; /**< Rx ring virtual address */
112 /** address of Rx software ring */
113 struct ngbe_rx_entry *sw_ring;
114 /** address of scattered Rx software ring */
115 struct ngbe_scattered_rx_entry *sw_sc_ring;
117 struct rte_mbuf *pkt_first_seg; /**< First segment of current packet */
118 struct rte_mbuf *pkt_last_seg; /**< Last segment of current packet */
119 uint16_t nb_rx_desc; /**< number of Rx descriptors */
120 uint16_t rx_tail; /**< current value of RDT register */
121 uint16_t nb_rx_hold; /**< number of held free Rx desc */
123 uint16_t rx_nb_avail; /**< nr of staged pkts ready to ret to app */
124 uint16_t rx_next_avail; /**< idx of next staged pkt to ret to app */
125 uint16_t rx_free_trigger; /**< triggers rx buffer allocation */
127 uint16_t rx_free_thresh; /**< max free Rx desc to hold */
128 uint16_t queue_id; /**< RX queue index */
129 uint16_t reg_idx; /**< RX queue register index */
130 uint16_t port_id; /**< Device port identifier */
131 uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En */
132 uint8_t rx_deferred_start; /**< not in global dev start */
133 /** need to alloc dummy mbuf, for wraparound when scanning hw ring */
134 struct rte_mbuf fake_mbuf;
135 /** hold packets to return to application */
136 struct rte_mbuf *rx_stage[RTE_PMD_NGBE_RX_MAX_BURST * 2];
143 NGBE_CTX_0 = 0, /**< CTX0 */
144 NGBE_CTX_1 = 1, /**< CTX1 */
145 NGBE_CTX_NUM = 2, /**< CTX NUMBER */
149 * Structure to check if new context need be built
151 struct ngbe_ctx_info {
152 uint64_t flags; /**< ol_flags for context build. */
156 * Structure associated with each Tx queue.
158 struct ngbe_tx_queue {
159 /** Tx ring virtual address */
160 volatile struct ngbe_tx_desc *tx_ring;
162 uint64_t tx_ring_phys_addr; /**< Tx ring DMA address */
163 struct ngbe_tx_entry *sw_ring; /**< address of SW ring for scalar PMD */
164 volatile uint32_t *tdt_reg_addr; /**< Address of TDT register */
165 volatile uint32_t *tdc_reg_addr; /**< Address of TDC register */
166 uint16_t nb_tx_desc; /**< number of Tx descriptors */
167 uint16_t tx_tail; /**< current value of TDT reg */
169 * Start freeing Tx buffers if there are less free descriptors than
172 uint16_t tx_free_thresh;
173 /** Index to last Tx descriptor to have been cleaned */
174 uint16_t last_desc_cleaned;
175 /** Total number of Tx descriptors ready to be allocated */
177 uint16_t tx_next_dd; /**< next desc to scan for DD bit */
178 uint16_t queue_id; /**< Tx queue index */
179 uint16_t reg_idx; /**< Tx queue register index */
180 uint16_t port_id; /**< Device port identifier */
181 uint8_t pthresh; /**< Prefetch threshold register */
182 uint8_t hthresh; /**< Host threshold register */
183 uint8_t wthresh; /**< Write-back threshold reg */
184 uint32_t ctx_curr; /**< Hardware context states */
185 /** Hardware context0 history */
186 struct ngbe_ctx_info ctx_cache[NGBE_CTX_NUM];
187 uint8_t tx_deferred_start; /**< not in global dev start */
189 const struct ngbe_txq_ops *ops; /**< txq ops */
192 struct ngbe_txq_ops {
193 void (*release_mbufs)(struct ngbe_tx_queue *txq);
194 void (*free_swring)(struct ngbe_tx_queue *txq);
195 void (*reset)(struct ngbe_tx_queue *txq);
198 #endif /* _NGBE_RXTX_H_ */