1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018-2019 NXP
9 #include <sys/eventfd.h>
10 #include <arpa/inet.h>
13 pfe_hif_alloc_descr(struct pfe_hif *hif)
18 PMD_INIT_FUNC_TRACE();
20 addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
21 HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
23 PFE_PMD_ERR("Could not allocate buffer descriptors!");
28 hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
29 hif->descr_baseaddr_v = addr;
30 hif->rx_ring_size = HIF_RX_DESC_NT;
31 hif->tx_ring_size = HIF_TX_DESC_NT;
40 pfe_hif_free_descr(struct pfe_hif *hif)
42 PMD_INIT_FUNC_TRACE();
44 rte_free(hif->descr_baseaddr_v);
47 /* pfe_hif_release_buffers */
49 pfe_hif_release_buffers(struct pfe_hif *hif)
51 struct hif_desc *desc;
53 struct rte_mbuf *mbuf;
54 struct rte_pktmbuf_pool_private *mb_priv;
56 hif->rx_base = hif->descr_baseaddr_v;
60 mb_priv = rte_mempool_get_priv(hif->shm->pool);
61 for (i = 0; i < hif->rx_ring_size; i++) {
62 if (readl(&desc->data)) {
63 if (i < hif->shm->rx_buf_pool_cnt &&
64 !hif->shm->rx_buf_pool[i]) {
65 mbuf = hif->rx_buf_vaddr[i] + PFE_PKT_HEADER_SZ
66 - sizeof(struct rte_mbuf)
67 - RTE_PKTMBUF_HEADROOM
68 - mb_priv->mbuf_priv_size;
69 hif->shm->rx_buf_pool[i] = mbuf;
72 writel(0, &desc->data);
73 writel(0, &desc->status);
74 writel(0, &desc->ctrl);
80 * pfe_hif_init_buffers
81 * This function initializes the HIF Rx/Tx ring descriptors and
82 * initialize Rx queue with buffers.
85 pfe_hif_init_buffers(struct pfe_hif *hif)
87 struct hif_desc *desc, *first_desc_p;
90 PMD_INIT_FUNC_TRACE();
92 /* Check enough Rx buffers available in the shared memory */
93 if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
96 hif->rx_base = hif->descr_baseaddr_v;
97 memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
99 /*Initialize Rx descriptors */
101 first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
103 for (i = 0; i < hif->rx_ring_size; i++) {
104 /* Initialize Rx buffers from the shared memory */
105 struct rte_mbuf *mbuf =
106 (struct rte_mbuf *)hif->shm->rx_buf_pool[i];
108 /* PFE mbuf structure is as follow:
109 * ----------------------------------------------------------+
110 * | mbuf | priv | headroom (annotation + PFE data) | data |
111 * ----------------------------------------------------------+
113 * As we are expecting additional information like parse
114 * results, eth id, queue id from PFE block along with data.
115 * so we have to provide additional memory for each packet to
116 * HIF rx rings so that PFE block can write its headers.
117 * so, we are giving the data pointor to HIF rings whose
118 * calculation is as below:
119 * mbuf->data_pointor - Required_header_size
121 * We are utilizing the HEADROOM area to receive the PFE
122 * block headers. On packet reception, HIF driver will use
123 * PFE headers information based on which it will decide
124 * the clients and fill the parse results.
125 * after that application can use/overwrite the HEADROOM area.
127 hif->rx_buf_vaddr[i] =
128 (void *)((size_t)mbuf->buf_addr + mbuf->data_off -
130 hif->rx_buf_addr[i] =
131 (void *)(size_t)(rte_pktmbuf_iova(mbuf) -
133 hif->rx_buf_len[i] = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
135 hif->shm->rx_buf_pool[i] = NULL;
137 writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
139 writel(0, &desc->status);
142 * Ensure everything else is written to DDR before
147 writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
148 | BD_CTRL_DIR | BD_CTRL_DESC_EN
149 | BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
151 /* Chain descriptors */
152 writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
156 /* Overwrite last descriptor to chain it to first one*/
158 writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
160 hif->rxtoclean_index = 0;
162 /*Initialize Rx buffer descriptor ring base address */
163 writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
165 hif->tx_base = hif->rx_base + hif->rx_ring_size;
166 first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
168 memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
170 /*Initialize tx descriptors */
173 for (i = 0; i < hif->tx_ring_size; i++) {
174 /* Chain descriptors */
175 writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
176 writel(0, &desc->ctrl);
180 /* Overwrite last descriptor to chain it to first one */
182 writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
183 hif->txavail = hif->tx_ring_size;
188 /*Initialize Tx buffer descriptor ring base address */
189 writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
195 * pfe_hif_client_register
197 * This function used to register a client driver with the HIF driver.
200 * 0 - on Successful registration
203 pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
204 struct hif_client_shm *client_shm)
206 struct hif_client *client = &hif->client[client_id];
208 struct rx_queue_desc *rx_qbase;
209 struct tx_queue_desc *tx_qbase;
210 struct hif_rx_queue *rx_queue;
211 struct hif_tx_queue *tx_queue;
214 PMD_INIT_FUNC_TRACE();
216 rte_spinlock_lock(&hif->tx_lock);
218 if (test_bit(client_id, &hif->shm->g_client_status[0])) {
219 PFE_PMD_ERR("client %d already registered", client_id);
224 memset(client, 0, sizeof(struct hif_client));
226 /* Initialize client Rx queues baseaddr, size */
228 cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
229 /* Check if client is requesting for more queues than supported */
230 if (cnt > HIF_CLIENT_QUEUES_MAX)
231 cnt = HIF_CLIENT_QUEUES_MAX;
234 rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
235 for (i = 0; i < cnt; i++) {
236 rx_queue = &client->rx_q[i];
237 rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
238 rx_queue->size = client_shm->rx_qsize;
239 rx_queue->write_idx = 0;
242 /* Initialize client Tx queues baseaddr, size */
243 cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
245 /* Check if client is requesting for more queues than supported */
246 if (cnt > HIF_CLIENT_QUEUES_MAX)
247 cnt = HIF_CLIENT_QUEUES_MAX;
250 tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
251 for (i = 0; i < cnt; i++) {
252 tx_queue = &client->tx_q[i];
253 tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
254 tx_queue->size = client_shm->tx_qsize;
255 tx_queue->ack_idx = 0;
258 set_bit(client_id, &hif->shm->g_client_status[0]);
261 rte_spinlock_unlock(&hif->tx_lock);
267 * pfe_hif_client_unregister
269 * This function used to unregister a client from the HIF driver.
273 pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
275 PMD_INIT_FUNC_TRACE();
278 * Mark client as no longer available (which prevents further packet
279 * receive for this client)
281 rte_spinlock_lock(&hif->tx_lock);
283 if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
284 PFE_PMD_ERR("client %d not registered", client_id);
286 rte_spinlock_unlock(&hif->tx_lock);
290 clear_bit(client_id, &hif->shm->g_client_status[0]);
292 rte_spinlock_unlock(&hif->tx_lock);
296 * client_put_rxpacket-
298 static struct rte_mbuf *
299 client_put_rxpacket(struct hif_rx_queue *queue,
301 u32 flags, u32 client_ctrl,
302 struct rte_mempool *pool,
305 struct rx_queue_desc *desc = queue->base + queue->write_idx;
306 struct rte_mbuf *mbuf = NULL;
309 if (readl(&desc->ctrl) & CL_DESC_OWN) {
310 mbuf = rte_cpu_to_le_64(rte_pktmbuf_alloc(pool));
311 if (unlikely(!mbuf)) {
312 PFE_PMD_WARN("Buffer allocation failure\n");
317 desc->client_ctrl = client_ctrl;
319 * Ensure everything else is written to DDR before
323 writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
324 queue->write_idx = (queue->write_idx + 1)
327 *rem_len = mbuf->buf_len;
334 * pfe_hif_rx_process-
335 * This function does pfe hif rx queue processing.
336 * Dequeue packet from Rx queue and send it to corresponding client queue
339 pfe_hif_rx_process(struct pfe *pfe, int budget)
341 struct hif_desc *desc;
342 struct hif_hdr *pkt_hdr;
343 struct __hif_hdr hif_hdr;
345 int rtc, len, rx_processed = 0;
346 struct __hif_desc local_desc;
347 int flags = 0, wait_for_last = 0, retry = 0;
348 unsigned int buf_size = 0;
349 struct rte_mbuf *mbuf = NULL;
350 struct pfe_hif *hif = &pfe->hif;
352 rte_spinlock_lock(&hif->lock);
354 rtc = hif->rxtoclean_index;
356 while (rx_processed < budget) {
357 desc = hif->rx_base + rtc;
359 __memcpy12(&local_desc, desc);
361 /* ACK pending Rx interrupt */
362 if (local_desc.ctrl & BD_CTRL_DESC_EN) {
363 if (unlikely(wait_for_last))
369 len = BD_BUF_LEN(local_desc.ctrl);
370 pkt_hdr = (struct hif_hdr *)hif->rx_buf_vaddr[rtc];
372 /* Track last HIF header received */
376 __memcpy8(&hif_hdr, pkt_hdr);
378 hif->qno = hif_hdr.hdr.q_num;
379 hif->client_id = hif_hdr.hdr.client_id;
380 hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
381 hif_hdr.hdr.client_ctrl;
382 flags = CL_DESC_FIRST;
388 if (local_desc.ctrl & BD_CTRL_LIFM) {
389 flags |= CL_DESC_LAST;
395 /* Check for valid client id and still registered */
396 if (hif->client_id >= HIF_CLIENTS_MAX ||
397 !(test_bit(hif->client_id,
398 &hif->shm->g_client_status[0]))) {
399 PFE_PMD_INFO("packet with invalid client id %d qnum %d",
400 hif->client_id, hif->qno);
402 free_buf = hif->rx_buf_addr[rtc];
407 /* Check to valid queue number */
408 if (hif->client[hif->client_id].rx_qn <= hif->qno) {
409 PFE_DP_LOG(DEBUG, "packet with invalid queue: %d",
416 client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
417 (void *)pkt_hdr, len, flags,
418 hif->client_ctrl, hif->shm->pool,
421 if (unlikely(!mbuf)) {
423 pfe_tx_do_cleanup(pfe);
427 rx_processed = budget;
429 if (flags & CL_DESC_FIRST)
432 PFE_DP_LOG(DEBUG, "No buffers");
438 free_buf = (void *)(size_t)rte_pktmbuf_iova(mbuf);
439 free_buf = free_buf - PFE_PKT_HEADER_SZ;
441 /*Fill free buffer in the descriptor */
442 hif->rx_buf_addr[rtc] = free_buf;
443 hif->rx_buf_vaddr[rtc] = (void *)((size_t)mbuf->buf_addr +
444 mbuf->data_off - PFE_PKT_HEADER_SZ);
445 hif->rx_buf_len[rtc] = buf_size - RTE_PKTMBUF_HEADROOM;
448 writel(DDR_PHYS_TO_PFE(free_buf), &desc->data);
450 * Ensure everything else is written to DDR before
454 writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
455 BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
458 rtc = (rtc + 1) & (hif->rx_ring_size - 1);
460 if (local_desc.ctrl & BD_CTRL_LIFM) {
461 if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED))
469 hif->rxtoclean_index = rtc;
470 rte_spinlock_unlock(&hif->lock);
472 /* we made some progress, re-start rx dma in case it stopped */
479 * client_ack_txpacket-
480 * This function ack the Tx packet in the give client Tx queue by resetting
481 * ownership bit in the descriptor.
484 client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
487 struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
488 struct tx_queue_desc *desc = queue->base + queue->ack_idx;
490 if (readl(&desc->ctrl) & CL_DESC_OWN) {
491 writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
492 queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
497 /*This should not happen */
498 PFE_PMD_ERR("%d %d %d %d %d %p %d",
499 hif->txtosend, hif->txtoclean, hif->txavail,
500 client_id, q_no, queue, queue->ack_idx);
506 __hif_tx_done_process(struct pfe *pfe, int count)
508 struct hif_desc *desc;
509 struct hif_desc_sw *desc_sw;
510 unsigned int ttc, tx_avl;
511 int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
512 struct pfe_hif *hif = &pfe->hif;
514 ttc = hif->txtoclean;
515 tx_avl = hif->txavail;
517 while ((tx_avl < hif->tx_ring_size) && count--) {
518 desc = hif->tx_base + ttc;
520 if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
523 desc_sw = &hif->tx_sw_queue[ttc];
525 if (desc_sw->client_id > HIF_CLIENTS_MAX)
526 PFE_PMD_ERR("Invalid cl id %d", desc_sw->client_id);
528 pkts_done[desc_sw->client_id]++;
530 client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
532 ttc = (ttc + 1) & (hif->tx_ring_size - 1);
537 hif_lib_indicate_client(pfe->hif_client[0], EVENT_TXDONE_IND,
540 hif_lib_indicate_client(pfe->hif_client[1], EVENT_TXDONE_IND,
542 hif->txtoclean = ttc;
543 hif->txavail = tx_avl;
547 hif_tx_done_process(struct pfe *pfe, int count)
549 struct pfe_hif *hif = &pfe->hif;
550 rte_spinlock_lock(&hif->tx_lock);
551 __hif_tx_done_process(pfe, count);
552 rte_spinlock_unlock(&hif->tx_lock);
556 pfe_tx_do_cleanup(struct pfe *pfe)
558 hif_tx_done_process(pfe, HIF_TX_DESC_NT);
563 * This function puts one packet in the HIF Tx queue
566 hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
567 q_no, void *data, u32 len, unsigned int flags)
569 struct hif_desc *desc;
570 struct hif_desc_sw *desc_sw;
572 desc = hif->tx_base + hif->txtosend;
573 desc_sw = &hif->tx_sw_queue[hif->txtosend];
576 desc_sw->client_id = client_id;
577 desc_sw->q_no = q_no;
578 desc_sw->flags = flags;
580 writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
582 hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
585 if ((!((flags & HIF_DATA_VALID) && (flags &
590 * Ensure everything else is written to DDR before
596 desc_sw = &hif->tx_sw_queue[hif->txtoflush];
597 desc = hif->tx_base + hif->txtoflush;
599 if (desc_sw->flags & HIF_LAST_BUFFER) {
600 writel((BD_CTRL_LIFM |
601 BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
602 | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
603 BD_BUF_LEN(desc_sw->len)),
606 writel((BD_CTRL_DESC_EN |
607 BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
609 hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
611 while (hif->txtoflush != hif->txtosend)
619 hif_process_client_req(struct pfe_hif *hif, int req,
620 int data1, __rte_unused int data2)
622 unsigned int client_id = data1;
624 if (client_id >= HIF_CLIENTS_MAX) {
625 PFE_PMD_ERR("client id %d out of bounds", client_id);
630 case REQUEST_CL_REGISTER:
631 /* Request for register a client */
632 PFE_PMD_INFO("register client_id %d", client_id);
633 pfe_hif_client_register(hif, client_id, (struct
634 hif_client_shm *)&hif->shm->client[client_id]);
637 case REQUEST_CL_UNREGISTER:
638 PFE_PMD_INFO("unregister client_id %d", client_id);
640 /* Request for unregister a client */
641 pfe_hif_client_unregister(hif, client_id);
646 PFE_PMD_ERR("unsupported request %d", req);
651 * Process client Tx queues
652 * Currently we don't have checking for tx pending
656 #if defined(LS1012A_PFE_RESET_WA)
658 pfe_hif_disable_rx_desc(struct pfe_hif *hif)
661 struct hif_desc *desc = hif->rx_base;
663 /*Mark all descriptors as LAST_BD */
664 for (ii = 0; ii < hif->rx_ring_size; ii++) {
665 desc->ctrl |= BD_CTRL_LAST_BD;
670 struct class_rx_hdr_t {
671 u32 next_ptr; /* ptr to the start of the first DDR buffer */
672 u16 length; /* total packet length */
673 u16 phyno; /* input physical port number */
674 u32 status; /* gemac status bits */
675 u32 status2; /* reserved for software usage */
678 /* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
681 #define STATUS_BAD_FRAME_ERR BIT(16)
682 #define STATUS_LENGTH_ERR BIT(17)
683 #define STATUS_CRC_ERR BIT(18)
684 #define STATUS_TOO_SHORT_ERR BIT(19)
685 #define STATUS_TOO_LONG_ERR BIT(20)
686 #define STATUS_CODE_ERR BIT(21)
687 #define STATUS_MC_HASH_MATCH BIT(22)
688 #define STATUS_CUMULATIVE_ARC_HIT BIT(23)
689 #define STATUS_UNICAST_HASH_MATCH BIT(24)
690 #define STATUS_IP_CHECKSUM_CORRECT BIT(25)
691 #define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
692 #define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
693 #define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
694 #define MIN_PKT_SIZE 64
695 #define DUMMY_PKT_COUNT 128
698 copy_to_lmem(u32 *dst, u32 *src, int len)
702 for (i = 0; i < len; i += sizeof(u32)) {
707 #if defined(RTE_TOOLCHAIN_GCC)
708 __attribute__ ((optimize(1)))
711 send_dummy_pkt_to_hif(void)
713 void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
715 struct class_rx_hdr_t local_hdr;
716 static u32 dummy_pkt[] = {
717 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
718 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
719 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
720 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
722 ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
726 lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
730 PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
731 physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
733 lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
735 local_hdr.phyno = htons(0); /* RX_PHY_0 */
736 local_hdr.length = htons(MIN_PKT_SIZE);
738 local_hdr.next_ptr = htonl((u32)physaddr);
739 /*Mark checksum is correct */
740 local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
741 STATUS_UDP_CHECKSUM_CORRECT |
742 STATUS_TCP_CHECKSUM_CORRECT |
743 STATUS_UNICAST_HASH_MATCH |
744 STATUS_CUMULATIVE_ARC_HIT));
745 copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
748 copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
751 writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
755 pfe_hif_rx_idle(struct pfe_hif *hif)
757 int hif_stop_loop = DUMMY_PKT_COUNT;
760 pfe_hif_disable_rx_desc(hif);
761 PFE_PMD_INFO("Bringing hif to idle state...");
762 writel(0, HIF_INT_ENABLE);
763 /*If HIF Rx BDP is busy send a dummy packet */
765 rx_status = readl(HIF_RX_STATUS);
766 if (rx_status & BDP_CSR_RX_DMA_ACTV)
767 send_dummy_pkt_to_hif();
770 } while (--hif_stop_loop);
772 if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
773 PFE_PMD_ERR("Failed\n");
775 PFE_PMD_INFO("Done\n");
781 * This function initializes the baseaddresses and irq, etc.
784 pfe_hif_init(struct pfe *pfe)
786 struct pfe_hif *hif = &pfe->hif;
789 PMD_INIT_FUNC_TRACE();
791 #if defined(LS1012A_PFE_RESET_WA)
792 pfe_hif_rx_idle(hif);
795 err = pfe_hif_alloc_descr(hif);
799 rte_spinlock_init(&hif->tx_lock);
800 rte_spinlock_init(&hif->lock);
802 gpi_enable(HGPI_BASE_ADDR);
803 if (getenv("PFE_INTR_SUPPORT")) {
804 struct epoll_event epoll_ev;
805 int event_fd = -1, epoll_fd, pfe_cdev_fd;
807 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
808 if (pfe_cdev_fd < 0) {
809 PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
811 pfe->cdev_fd = PFE_CDEV_INVALID_FD;
814 pfe->cdev_fd = pfe_cdev_fd;
816 event_fd = eventfd(0, EFD_NONBLOCK);
817 /* hif interrupt enable */
818 err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
820 PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n",
824 epoll_fd = epoll_create(1);
825 epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
826 epoll_ev.data.fd = event_fd;
827 err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
829 PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno);
832 pfe->hif.epoll_fd = epoll_fd;
841 pfe_hif_exit(struct pfe *pfe)
843 struct pfe_hif *hif = &pfe->hif;
845 PMD_INIT_FUNC_TRACE();
847 rte_spinlock_lock(&hif->lock);
848 hif->shm->g_client_status[0] = 0;
849 /* Make sure all clients are disabled*/
850 hif->shm->g_client_status[1] = 0;
852 rte_spinlock_unlock(&hif->lock);
855 #if defined(LS1012A_PFE_RESET_WA)
856 pfe_hif_rx_idle(hif);
862 pfe_hif_release_buffers(hif);
863 pfe_hif_shm_clean(hif->shm);
865 pfe_hif_free_descr(hif);
866 pfe->hif.setuped = 0;
868 gpi_disable(HGPI_BASE_ADDR);