1 /* SPDX-License-Identifier: BSD-3-Clause
8 unsigned int emac_txq_cnt;
12 * Common functions used by HIF client drivers
15 /*HIF shared memory Global variable */
16 struct hif_shm ghif_shm;
18 /*This function sends indication to HIF driver
20 * @param[in] hif hif context
23 hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
26 hif_process_client_req(hif, req, data1, data2);
30 hif_lib_indicate_client(struct hif_client_s *client, int event_type,
33 if (!client || event_type >= HIF_EVENT_MAX ||
34 qno >= HIF_CLIENT_QUEUES_MAX)
37 if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
38 client->event_handler(client->priv, event_type, qno);
41 /*This function releases Rx queue descriptors memory and pre-filled buffers
43 * @param[in] client hif_client context
46 hif_lib_client_release_rx_buffers(struct hif_client_s *client)
48 struct rte_mempool *pool;
49 struct rte_pktmbuf_pool_private *mb_priv;
50 struct rx_queue_desc *desc;
54 pool = client->pfe->hif.shm->pool;
55 mb_priv = rte_mempool_get_priv(pool);
56 for (qno = 0; qno < client->rx_qn; qno++) {
57 desc = client->rx_q[qno].base;
59 for (ii = 0; ii < client->rx_q[qno].size; ii++) {
60 buf = (void *)desc->data;
62 /* Data pointor to mbuf pointor calculation:
63 * "Data - User private data - headroom - mbufsize"
64 * Actual data pointor given to HIF BDs was
65 * "mbuf->data_offset - PFE_PKT_HEADER_SZ"
67 buf = buf + PFE_PKT_HEADER_SZ
68 - sizeof(struct rte_mbuf)
69 - RTE_PKTMBUF_HEADROOM
70 - mb_priv->mbuf_priv_size;
71 rte_pktmbuf_free((struct rte_mbuf *)buf);
77 rte_free(client->rx_qbase);
80 /*This function allocates memory for the rxq descriptors and pre-fill rx queues
82 * @param[in] client client context
83 * @param[in] q_size size of the rxQ, all queues are of same size
86 hif_lib_client_init_rx_buffers(struct hif_client_s *client,
89 struct rx_queue_desc *desc;
90 struct hif_client_rx_queue *queue;
93 /*Allocate memory for the client queues */
94 client->rx_qbase = rte_malloc(NULL, client->rx_qn * q_size *
95 sizeof(struct rx_queue_desc), RTE_CACHE_LINE_SIZE);
96 if (!client->rx_qbase)
99 for (qno = 0; qno < client->rx_qn; qno++) {
100 queue = &client->rx_q[qno];
102 queue->base = client->rx_qbase + qno * q_size * sizeof(struct
104 queue->size = q_size;
106 queue->write_idx = 0;
108 queue->port_id = client->port_id;
109 queue->priv = client->priv;
110 PFE_PMD_DEBUG("rx queue: %d, base: %p, size: %d\n", qno,
111 queue->base, queue->size);
114 for (qno = 0; qno < client->rx_qn; qno++) {
115 queue = &client->rx_q[qno];
118 for (ii = 0; ii < queue->size; ii++) {
119 desc->ctrl = CL_DESC_OWN;
132 hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
135 * Check if there are any pending packets. Client must flush the tx
136 * queues before unregistering, by calling by calling
137 * hif_lib_tx_get_next_complete()
139 * Hif no longer calls since we are no longer registered
141 if (queue->tx_pending)
142 PFE_PMD_ERR("pending transmit packet");
146 hif_lib_client_release_tx_buffers(struct hif_client_s *client)
150 for (qno = 0; qno < client->tx_qn; qno++)
151 hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
153 rte_free(client->tx_qbase);
157 hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
160 struct hif_client_tx_queue *queue;
163 client->tx_qbase = rte_malloc(NULL, client->tx_qn * q_size *
164 sizeof(struct tx_queue_desc), RTE_CACHE_LINE_SIZE);
165 if (!client->tx_qbase)
168 for (qno = 0; qno < client->tx_qn; qno++) {
169 queue = &client->tx_q[qno];
171 queue->base = client->tx_qbase + qno * q_size * sizeof(struct
173 queue->size = q_size;
175 queue->write_idx = 0;
176 queue->tx_pending = 0;
177 queue->nocpy_flag = 0;
178 queue->prev_tmu_tx_pkts = 0;
179 queue->done_tmu_tx_pkts = 0;
180 queue->priv = client->priv;
182 queue->port_id = client->port_id;
184 PFE_PMD_DEBUG("tx queue: %d, base: %p, size: %d", qno,
185 queue->base, queue->size);
192 hif_lib_event_dummy(__rte_unused void *priv,
193 __rte_unused int event_type, __rte_unused int qno)
199 hif_lib_client_register(struct hif_client_s *client)
201 struct hif_shm *hif_shm;
202 struct hif_client_shm *client_shm;
205 PMD_INIT_FUNC_TRACE();
207 /*Allocate memory before spin_lock*/
208 if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
213 if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
218 rte_spinlock_lock(&client->pfe->hif.lock);
219 if (!(client->pfe) || client->id >= HIF_CLIENTS_MAX ||
220 client->pfe->hif_client[client->id]) {
225 hif_shm = client->pfe->hif.shm;
227 if (!client->event_handler)
228 client->event_handler = hif_lib_event_dummy;
230 /*Initialize client specific shared memory */
231 client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
232 client_shm->rx_qbase = (unsigned long)client->rx_qbase;
233 client_shm->rx_qsize = client->rx_qsize;
234 client_shm->tx_qbase = (unsigned long)client->tx_qbase;
235 client_shm->tx_qsize = client->tx_qsize;
236 client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
237 (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
239 for (i = 0; i < HIF_EVENT_MAX; i++) {
240 client->queue_mask[i] = 0; /*
241 * By default all events are
246 /*Indicate to HIF driver*/
247 hif_lib_indicate_hif(&client->pfe->hif, REQUEST_CL_REGISTER,
250 PFE_PMD_DEBUG("client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d",
251 client, client->id, client->tx_qsize, client->rx_qsize);
255 client->pfe->hif_client[client->id] = client;
256 rte_spinlock_unlock(&client->pfe->hif.lock);
261 rte_spinlock_unlock(&client->pfe->hif.lock);
262 hif_lib_client_release_tx_buffers(client);
265 hif_lib_client_release_rx_buffers(client);
272 hif_lib_client_unregister(struct hif_client_s *client)
274 struct pfe *pfe = client->pfe;
275 u32 client_id = client->id;
277 PFE_PMD_INFO("client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d",
278 client, client->id, client->tx_qsize, client->rx_qsize);
280 rte_spinlock_lock(&pfe->hif.lock);
281 hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
283 hif_lib_client_release_tx_buffers(client);
284 hif_lib_client_release_rx_buffers(client);
285 pfe->hif_client[client_id] = NULL;
286 rte_spinlock_unlock(&pfe->hif.lock);
292 hif_lib_event_handler_start(struct hif_client_s *client, int event,
295 struct hif_client_rx_queue *queue = &client->rx_q[qno];
296 struct rx_queue_desc *desc = queue->base + queue->read_idx;
298 if (event >= HIF_EVENT_MAX || qno >= HIF_CLIENT_QUEUES_MAX) {
299 PFE_PMD_WARN("Unsupported event : %d queue number : %d",
304 test_and_clear_bit(qno, &client->queue_mask[event]);
307 case EVENT_RX_PKT_IND:
308 if (!(desc->ctrl & CL_DESC_OWN))
309 hif_lib_indicate_client(client,
310 EVENT_RX_PKT_IND, qno);
313 case EVENT_HIGH_RX_WM:
314 case EVENT_TXDONE_IND:
323 hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
324 unsigned int *flags, __rte_unused int count)
326 struct hif_client_tx_queue *queue = &client->tx_q[qno];
327 struct tx_queue_desc *desc = queue->base + queue->read_idx;
329 PFE_DP_LOG(DEBUG, "qno : %d rd_indx: %d pending:%d",
330 qno, queue->read_idx, queue->tx_pending);
332 if (!queue->tx_pending)
335 if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
338 if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
339 queue->done_tmu_tx_pkts = UINT_MAX -
340 queue->prev_tmu_tx_pkts + tmu_tx_pkts;
342 queue->done_tmu_tx_pkts = tmu_tx_pkts -
343 queue->prev_tmu_tx_pkts;
345 queue->prev_tmu_tx_pkts = tmu_tx_pkts;
347 if (!queue->done_tmu_tx_pkts)
351 if (desc->ctrl & CL_DESC_OWN)
354 queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
357 *flags = CL_DESC_GET_FLAGS(desc->ctrl);
359 if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
360 queue->done_tmu_tx_pkts--;
366 pfe_hif_lib_init(struct pfe *pfe)
368 PMD_INIT_FUNC_TRACE();
370 emac_txq_cnt = EMAC_TXQ_CNT;
371 pfe->hif.shm = &ghif_shm;
377 pfe_hif_lib_exit(__rte_unused struct pfe *pfe)
379 PMD_INIT_FUNC_TRACE();