1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
8 unsigned int emac_txq_cnt;
12 * Common functions used by HIF client drivers
15 /*HIF shared memory Global variable */
16 struct hif_shm ghif_shm;
18 /* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
19 * This function should be called after pfe_hif_exit
21 * @param[in] hif_shm Shared memory address location in DDR
24 pfe_hif_shm_clean(struct hif_shm *hif_shm)
29 for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
30 pkt = hif_shm->rx_buf_pool[i];
32 rte_pktmbuf_free((struct rte_mbuf *)pkt);
36 /* Initialize shared memory used between HIF driver and clients,
37 * allocate rx_buffer_pool required for HIF Rx descriptors.
38 * This function should be called before initializing HIF driver.
40 * @param[in] hif_shm Shared memory address location in DDR
41 * @rerurn 0 - on succes, <0 on fail to initialize
44 pfe_hif_shm_init(struct hif_shm *hif_shm, struct rte_mempool *mb_pool)
47 struct rte_mbuf *mbuf;
49 memset(hif_shm, 0, sizeof(struct hif_shm));
50 hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
52 for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
53 mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(mb_pool));
55 hif_shm->rx_buf_pool[i] = mbuf;
63 PFE_PMD_ERR("Low memory");
64 pfe_hif_shm_clean(hif_shm);
68 /*This function sends indication to HIF driver
70 * @param[in] hif hif context
73 hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
76 hif_process_client_req(hif, req, data1, data2);
80 hif_lib_indicate_client(struct hif_client_s *client, int event_type,
83 if (!client || event_type >= HIF_EVENT_MAX ||
84 qno >= HIF_CLIENT_QUEUES_MAX)
87 if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
88 client->event_handler(client->priv, event_type, qno);
91 /*This function releases Rx queue descriptors memory and pre-filled buffers
93 * @param[in] client hif_client context
96 hif_lib_client_release_rx_buffers(struct hif_client_s *client)
98 struct rte_mempool *pool;
99 struct rte_pktmbuf_pool_private *mb_priv;
100 struct rx_queue_desc *desc;
101 unsigned int qno, ii;
104 pool = client->pfe->hif.shm->pool;
105 mb_priv = rte_mempool_get_priv(pool);
106 for (qno = 0; qno < client->rx_qn; qno++) {
107 desc = client->rx_q[qno].base;
109 for (ii = 0; ii < client->rx_q[qno].size; ii++) {
110 buf = (void *)desc->data;
112 /* Data pointor to mbuf pointor calculation:
113 * "Data - User private data - headroom - mbufsize"
114 * Actual data pointor given to HIF BDs was
115 * "mbuf->data_offset - PFE_PKT_HEADER_SZ"
117 buf = buf + PFE_PKT_HEADER_SZ
118 - sizeof(struct rte_mbuf)
119 - RTE_PKTMBUF_HEADROOM
120 - mb_priv->mbuf_priv_size;
121 rte_pktmbuf_free((struct rte_mbuf *)buf);
127 rte_free(client->rx_qbase);
130 /*This function allocates memory for the rxq descriptors and pre-fill rx queues
132 * @param[in] client client context
133 * @param[in] q_size size of the rxQ, all queues are of same size
136 hif_lib_client_init_rx_buffers(struct hif_client_s *client,
139 struct rx_queue_desc *desc;
140 struct hif_client_rx_queue *queue;
141 unsigned int ii, qno;
143 /*Allocate memory for the client queues */
144 client->rx_qbase = rte_malloc(NULL, client->rx_qn * q_size *
145 sizeof(struct rx_queue_desc), RTE_CACHE_LINE_SIZE);
146 if (!client->rx_qbase)
149 for (qno = 0; qno < client->rx_qn; qno++) {
150 queue = &client->rx_q[qno];
152 queue->base = client->rx_qbase + qno * q_size * sizeof(struct
154 queue->size = q_size;
156 queue->write_idx = 0;
158 queue->port_id = client->port_id;
159 queue->priv = client->priv;
160 PFE_PMD_DEBUG("rx queue: %d, base: %p, size: %d\n", qno,
161 queue->base, queue->size);
164 for (qno = 0; qno < client->rx_qn; qno++) {
165 queue = &client->rx_q[qno];
168 for (ii = 0; ii < queue->size; ii++) {
169 desc->ctrl = CL_DESC_OWN;
182 hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
185 * Check if there are any pending packets. Client must flush the tx
186 * queues before unregistering, by calling by calling
187 * hif_lib_tx_get_next_complete()
189 * Hif no longer calls since we are no longer registered
191 if (queue->tx_pending)
192 PFE_PMD_ERR("pending transmit packet");
196 hif_lib_client_release_tx_buffers(struct hif_client_s *client)
200 for (qno = 0; qno < client->tx_qn; qno++)
201 hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
203 rte_free(client->tx_qbase);
207 hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
210 struct hif_client_tx_queue *queue;
213 client->tx_qbase = rte_malloc(NULL, client->tx_qn * q_size *
214 sizeof(struct tx_queue_desc), RTE_CACHE_LINE_SIZE);
215 if (!client->tx_qbase)
218 for (qno = 0; qno < client->tx_qn; qno++) {
219 queue = &client->tx_q[qno];
221 queue->base = client->tx_qbase + qno * q_size * sizeof(struct
223 queue->size = q_size;
225 queue->write_idx = 0;
226 queue->tx_pending = 0;
227 queue->nocpy_flag = 0;
228 queue->prev_tmu_tx_pkts = 0;
229 queue->done_tmu_tx_pkts = 0;
230 queue->priv = client->priv;
232 queue->port_id = client->port_id;
234 PFE_PMD_DEBUG("tx queue: %d, base: %p, size: %d", qno,
235 queue->base, queue->size);
242 hif_lib_event_dummy(__rte_unused void *priv,
243 __rte_unused int event_type, __rte_unused int qno)
249 hif_lib_client_register(struct hif_client_s *client)
251 struct hif_shm *hif_shm;
252 struct hif_client_shm *client_shm;
255 PMD_INIT_FUNC_TRACE();
257 /*Allocate memory before spin_lock*/
258 if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
263 if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
268 rte_spinlock_lock(&client->pfe->hif.lock);
269 if (!(client->pfe) || client->id >= HIF_CLIENTS_MAX ||
270 client->pfe->hif_client[client->id]) {
275 hif_shm = client->pfe->hif.shm;
277 if (!client->event_handler)
278 client->event_handler = hif_lib_event_dummy;
280 /*Initialize client specific shared memory */
281 client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
282 client_shm->rx_qbase = (unsigned long)client->rx_qbase;
283 client_shm->rx_qsize = client->rx_qsize;
284 client_shm->tx_qbase = (unsigned long)client->tx_qbase;
285 client_shm->tx_qsize = client->tx_qsize;
286 client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
287 (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
289 for (i = 0; i < HIF_EVENT_MAX; i++) {
290 client->queue_mask[i] = 0; /*
291 * By default all events are
296 /*Indicate to HIF driver*/
297 hif_lib_indicate_hif(&client->pfe->hif, REQUEST_CL_REGISTER,
300 PFE_PMD_DEBUG("client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d",
301 client, client->id, client->tx_qsize, client->rx_qsize);
305 client->pfe->hif_client[client->id] = client;
306 rte_spinlock_unlock(&client->pfe->hif.lock);
311 rte_spinlock_unlock(&client->pfe->hif.lock);
312 hif_lib_client_release_tx_buffers(client);
315 hif_lib_client_release_rx_buffers(client);
322 hif_lib_client_unregister(struct hif_client_s *client)
324 struct pfe *pfe = client->pfe;
325 u32 client_id = client->id;
327 PFE_PMD_INFO("client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d",
328 client, client->id, client->tx_qsize, client->rx_qsize);
330 rte_spinlock_lock(&pfe->hif.lock);
331 hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
333 hif_lib_client_release_tx_buffers(client);
334 hif_lib_client_release_rx_buffers(client);
335 pfe->hif_client[client_id] = NULL;
336 rte_spinlock_unlock(&pfe->hif.lock);
342 hif_lib_event_handler_start(struct hif_client_s *client, int event,
345 struct hif_client_rx_queue *queue = &client->rx_q[qno];
346 struct rx_queue_desc *desc = queue->base + queue->read_idx;
348 if (event >= HIF_EVENT_MAX || qno >= HIF_CLIENT_QUEUES_MAX) {
349 PFE_PMD_WARN("Unsupported event : %d queue number : %d",
354 test_and_clear_bit(qno, &client->queue_mask[event]);
357 case EVENT_RX_PKT_IND:
358 if (!(desc->ctrl & CL_DESC_OWN))
359 hif_lib_indicate_client(client,
360 EVENT_RX_PKT_IND, qno);
363 case EVENT_HIGH_RX_WM:
364 case EVENT_TXDONE_IND:
372 #ifdef RTE_LIBRTE_PFE_SW_PARSE
374 pfe_sw_parse_pkt(struct rte_mbuf *mbuf)
376 struct rte_net_hdr_lens hdr_lens;
378 mbuf->packet_type = rte_net_get_ptype(mbuf, &hdr_lens,
379 RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK
380 | RTE_PTYPE_L4_MASK);
381 mbuf->l2_len = hdr_lens.l2_len;
382 mbuf->l3_len = hdr_lens.l3_len;
387 * This function gets one packet from the specified client queue
388 * It also refill the rx buffer
391 hif_lib_receive_pkt(struct hif_client_rx_queue *queue,
392 struct rte_mempool *pool, struct rte_mbuf **rx_pkts,
395 struct rx_queue_desc *desc;
396 struct pfe_eth_priv_s *priv = queue->priv;
397 struct rte_pktmbuf_pool_private *mb_priv;
398 struct rte_mbuf *mbuf, *p_mbuf = NULL, *first_mbuf = NULL;
399 struct rte_eth_stats *stats = &priv->stats;
400 int i, wait_for_last = 0;
401 #ifndef RTE_LIBRTE_PFE_SW_PARSE
402 struct pfe_parse *parse_res;
405 for (i = 0; i < nb_pkts;) {
407 desc = queue->base + queue->read_idx;
408 if ((desc->ctrl & CL_DESC_OWN)) {
409 stats->ipackets += i;
413 mb_priv = rte_mempool_get_priv(pool);
415 mbuf = desc->data + PFE_PKT_HEADER_SZ
416 - sizeof(struct rte_mbuf)
417 - RTE_PKTMBUF_HEADROOM
418 - mb_priv->mbuf_priv_size;
420 if (desc->ctrl & CL_DESC_FIRST) {
421 /* TODO size of priv data if present in
425 mbuf->pkt_len = CL_DESC_BUF_LEN(desc->ctrl)
426 - PFE_PKT_HEADER_SZ - size;
427 mbuf->data_len = mbuf->pkt_len;
428 mbuf->port = queue->port_id;
429 #ifdef RTE_LIBRTE_PFE_SW_PARSE
430 pfe_sw_parse_pkt(mbuf);
432 parse_res = (struct pfe_parse *)(desc->data +
434 mbuf->packet_type = parse_res->packet_type;
438 rx_pkts[i++] = first_mbuf;
440 mbuf->data_len = CL_DESC_BUF_LEN(desc->ctrl);
441 mbuf->data_off = mbuf->data_off -
443 first_mbuf->pkt_len += mbuf->data_len;
444 first_mbuf->nb_segs++;
447 stats->ibytes += mbuf->data_len;
450 if (desc->ctrl & CL_DESC_LAST)
455 * Needed so we don't free a buffer/page
456 * twice on module_exit
461 * Ensure everything else is written to DDR before
466 desc->ctrl = CL_DESC_OWN;
467 queue->read_idx = (queue->read_idx + 1) &
469 } while (wait_for_last);
471 stats->ipackets += i;
476 hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
477 client_id, unsigned int qno,
480 /* Optimize the write since the destinaton may be non-cacheable */
481 if (!((unsigned long)pkt_hdr & 0x3)) {
482 ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
485 ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
486 ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
490 /*This function puts the given packet in the specific client queue */
492 hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno,
493 void *data, void *data1, unsigned int len,
494 u32 client_ctrl, unsigned int flags, void *client_data)
496 struct hif_client_tx_queue *queue = &client->tx_q[qno];
497 struct tx_queue_desc *desc = queue->base + queue->write_idx;
500 if (flags & HIF_FIRST_BUFFER) {
501 data1 -= PFE_HIF_SIZE;
502 data -= PFE_HIF_SIZE;
505 hif_hdr_write(data1, client->id, qno, client_ctrl);
508 desc->data = client_data;
509 desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
511 hif_xmit_pkt(&client->pfe->hif, client->id, qno, data, len, flags);
513 queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
519 hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
520 unsigned int *flags, __rte_unused int count)
522 struct hif_client_tx_queue *queue = &client->tx_q[qno];
523 struct tx_queue_desc *desc = queue->base + queue->read_idx;
525 PFE_DP_LOG(DEBUG, "qno : %d rd_indx: %d pending:%d",
526 qno, queue->read_idx, queue->tx_pending);
528 if (!queue->tx_pending)
531 if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
534 if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
535 queue->done_tmu_tx_pkts = UINT_MAX -
536 queue->prev_tmu_tx_pkts + tmu_tx_pkts;
538 queue->done_tmu_tx_pkts = tmu_tx_pkts -
539 queue->prev_tmu_tx_pkts;
541 queue->prev_tmu_tx_pkts = tmu_tx_pkts;
543 if (!queue->done_tmu_tx_pkts)
547 if (desc->ctrl & CL_DESC_OWN)
550 queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
553 *flags = CL_DESC_GET_FLAGS(desc->ctrl);
555 if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
556 queue->done_tmu_tx_pkts--;
562 pfe_hif_lib_init(struct pfe *pfe)
564 PMD_INIT_FUNC_TRACE();
566 emac_txq_cnt = EMAC_TXQ_CNT;
567 pfe->hif.shm = &ghif_shm;
573 pfe_hif_lib_exit(__rte_unused struct pfe *pfe)
575 PMD_INIT_FUNC_TRACE();