1 /* SPDX-License-Identifier: BSD-3-Clause
9 #include <sys/eventfd.h>
12 pfe_hif_alloc_descr(struct pfe_hif *hif)
17 PMD_INIT_FUNC_TRACE();
19 addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
20 HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
22 PFE_PMD_ERR("Could not allocate buffer descriptors!");
27 hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
28 hif->descr_baseaddr_v = addr;
29 hif->rx_ring_size = HIF_RX_DESC_NT;
30 hif->tx_ring_size = HIF_TX_DESC_NT;
39 pfe_hif_free_descr(struct pfe_hif *hif)
41 PMD_INIT_FUNC_TRACE();
43 rte_free(hif->descr_baseaddr_v);
47 * pfe_hif_init_buffers
48 * This function initializes the HIF Rx/Tx ring descriptors and
49 * initialize Rx queue with buffers.
52 pfe_hif_init_buffers(struct pfe_hif *hif)
54 struct hif_desc *desc, *first_desc_p;
57 PMD_INIT_FUNC_TRACE();
59 /* Check enough Rx buffers available in the shared memory */
60 if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
63 hif->rx_base = hif->descr_baseaddr_v;
64 memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
66 /*Initialize Rx descriptors */
68 first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
70 for (i = 0; i < hif->rx_ring_size; i++) {
71 /* Initialize Rx buffers from the shared memory */
72 struct rte_mbuf *mbuf =
73 (struct rte_mbuf *)hif->shm->rx_buf_pool[i];
75 /* PFE mbuf structure is as follow:
76 * ----------------------------------------------------------+
77 * | mbuf | priv | headroom (annotation + PFE data) | data |
78 * ----------------------------------------------------------+
80 * As we are expecting additional information like parse
81 * results, eth id, queue id from PFE block along with data.
82 * so we have to provide additional memory for each packet to
83 * HIF rx rings so that PFE block can write its headers.
84 * so, we are giving the data pointor to HIF rings whose
85 * calculation is as below:
86 * mbuf->data_pointor - Required_header_size
88 * We are utilizing the HEADROOM area to receive the PFE
89 * block headers. On packet reception, HIF driver will use
90 * PFE headers information based on which it will decide
91 * the clients and fill the parse results.
92 * after that application can use/overwrite the HEADROOM area.
94 hif->rx_buf_vaddr[i] =
95 (void *)((size_t)mbuf->buf_addr + mbuf->data_off -
98 (void *)(size_t)(rte_pktmbuf_iova(mbuf) -
100 hif->rx_buf_len[i] = mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
102 hif->shm->rx_buf_pool[i] = NULL;
104 writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
106 writel(0, &desc->status);
109 * Ensure everything else is written to DDR before
114 writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
115 | BD_CTRL_DIR | BD_CTRL_DESC_EN
116 | BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
118 /* Chain descriptors */
119 writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
123 /* Overwrite last descriptor to chain it to first one*/
125 writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
127 hif->rxtoclean_index = 0;
129 /*Initialize Rx buffer descriptor ring base address */
130 writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
132 hif->tx_base = hif->rx_base + hif->rx_ring_size;
133 first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
135 memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
137 /*Initialize tx descriptors */
140 for (i = 0; i < hif->tx_ring_size; i++) {
141 /* Chain descriptors */
142 writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
143 writel(0, &desc->ctrl);
147 /* Overwrite last descriptor to chain it to first one */
149 writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
150 hif->txavail = hif->tx_ring_size;
155 /*Initialize Tx buffer descriptor ring base address */
156 writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
162 * pfe_hif_client_register
164 * This function used to register a client driver with the HIF driver.
167 * 0 - on Successful registration
170 pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
171 struct hif_client_shm *client_shm)
173 struct hif_client *client = &hif->client[client_id];
175 struct rx_queue_desc *rx_qbase;
176 struct tx_queue_desc *tx_qbase;
177 struct hif_rx_queue *rx_queue;
178 struct hif_tx_queue *tx_queue;
181 PMD_INIT_FUNC_TRACE();
183 rte_spinlock_lock(&hif->tx_lock);
185 if (test_bit(client_id, &hif->shm->g_client_status[0])) {
186 PFE_PMD_ERR("client %d already registered", client_id);
191 memset(client, 0, sizeof(struct hif_client));
193 /* Initialize client Rx queues baseaddr, size */
195 cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
196 /* Check if client is requesting for more queues than supported */
197 if (cnt > HIF_CLIENT_QUEUES_MAX)
198 cnt = HIF_CLIENT_QUEUES_MAX;
201 rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
202 for (i = 0; i < cnt; i++) {
203 rx_queue = &client->rx_q[i];
204 rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
205 rx_queue->size = client_shm->rx_qsize;
206 rx_queue->write_idx = 0;
209 /* Initialize client Tx queues baseaddr, size */
210 cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
212 /* Check if client is requesting for more queues than supported */
213 if (cnt > HIF_CLIENT_QUEUES_MAX)
214 cnt = HIF_CLIENT_QUEUES_MAX;
217 tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
218 for (i = 0; i < cnt; i++) {
219 tx_queue = &client->tx_q[i];
220 tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
221 tx_queue->size = client_shm->tx_qsize;
222 tx_queue->ack_idx = 0;
225 set_bit(client_id, &hif->shm->g_client_status[0]);
228 rte_spinlock_unlock(&hif->tx_lock);
234 * pfe_hif_client_unregister
236 * This function used to unregister a client from the HIF driver.
240 pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
242 PMD_INIT_FUNC_TRACE();
245 * Mark client as no longer available (which prevents further packet
246 * receive for this client)
248 rte_spinlock_lock(&hif->tx_lock);
250 if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
251 PFE_PMD_ERR("client %d not registered", client_id);
253 rte_spinlock_unlock(&hif->tx_lock);
257 clear_bit(client_id, &hif->shm->g_client_status[0]);
259 rte_spinlock_unlock(&hif->tx_lock);
263 hif_process_client_req(struct pfe_hif *hif, int req,
264 int data1, __rte_unused int data2)
266 unsigned int client_id = data1;
268 if (client_id >= HIF_CLIENTS_MAX) {
269 PFE_PMD_ERR("client id %d out of bounds", client_id);
274 case REQUEST_CL_REGISTER:
275 /* Request for register a client */
276 PFE_PMD_INFO("register client_id %d", client_id);
277 pfe_hif_client_register(hif, client_id, (struct
278 hif_client_shm *)&hif->shm->client[client_id]);
281 case REQUEST_CL_UNREGISTER:
282 PFE_PMD_INFO("unregister client_id %d", client_id);
284 /* Request for unregister a client */
285 pfe_hif_client_unregister(hif, client_id);
290 PFE_PMD_ERR("unsupported request %d", req);
295 * Process client Tx queues
296 * Currently we don't have checking for tx pending
300 #if defined(LS1012A_PFE_RESET_WA)
302 pfe_hif_disable_rx_desc(struct pfe_hif *hif)
305 struct hif_desc *desc = hif->rx_base;
307 /*Mark all descriptors as LAST_BD */
308 for (ii = 0; ii < hif->rx_ring_size; ii++) {
309 desc->ctrl |= BD_CTRL_LAST_BD;
314 struct class_rx_hdr_t {
315 u32 next_ptr; /* ptr to the start of the first DDR buffer */
316 u16 length; /* total packet length */
317 u16 phyno; /* input physical port number */
318 u32 status; /* gemac status bits */
319 u32 status2; /* reserved for software usage */
322 /* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
325 #define STATUS_BAD_FRAME_ERR BIT(16)
326 #define STATUS_LENGTH_ERR BIT(17)
327 #define STATUS_CRC_ERR BIT(18)
328 #define STATUS_TOO_SHORT_ERR BIT(19)
329 #define STATUS_TOO_LONG_ERR BIT(20)
330 #define STATUS_CODE_ERR BIT(21)
331 #define STATUS_MC_HASH_MATCH BIT(22)
332 #define STATUS_CUMULATIVE_ARC_HIT BIT(23)
333 #define STATUS_UNICAST_HASH_MATCH BIT(24)
334 #define STATUS_IP_CHECKSUM_CORRECT BIT(25)
335 #define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
336 #define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
337 #define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
338 #define MIN_PKT_SIZE 64
339 #define DUMMY_PKT_COUNT 128
342 copy_to_lmem(u32 *dst, u32 *src, int len)
346 for (i = 0; i < len; i += sizeof(u32)) {
351 #if defined(RTE_TOOLCHAIN_GCC)
352 __attribute__ ((optimize(1)))
355 send_dummy_pkt_to_hif(void)
357 void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
359 struct class_rx_hdr_t local_hdr;
360 static u32 dummy_pkt[] = {
361 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
362 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
363 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
364 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
366 ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
370 lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
374 PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
375 physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
377 lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
379 local_hdr.phyno = htons(0); /* RX_PHY_0 */
380 local_hdr.length = htons(MIN_PKT_SIZE);
382 local_hdr.next_ptr = htonl((u32)physaddr);
383 /*Mark checksum is correct */
384 local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
385 STATUS_UDP_CHECKSUM_CORRECT |
386 STATUS_TCP_CHECKSUM_CORRECT |
387 STATUS_UNICAST_HASH_MATCH |
388 STATUS_CUMULATIVE_ARC_HIT));
389 copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
392 copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
395 writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
399 pfe_hif_rx_idle(struct pfe_hif *hif)
401 int hif_stop_loop = DUMMY_PKT_COUNT;
404 pfe_hif_disable_rx_desc(hif);
405 PFE_PMD_INFO("Bringing hif to idle state...");
406 writel(0, HIF_INT_ENABLE);
407 /*If HIF Rx BDP is busy send a dummy packet */
409 rx_status = readl(HIF_RX_STATUS);
410 if (rx_status & BDP_CSR_RX_DMA_ACTV)
411 send_dummy_pkt_to_hif();
414 } while (--hif_stop_loop);
416 if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
417 PFE_PMD_ERR("Failed\n");
419 PFE_PMD_INFO("Done\n");
425 * This function initializes the baseaddresses and irq, etc.
428 pfe_hif_init(struct pfe *pfe)
430 struct pfe_hif *hif = &pfe->hif;
433 PMD_INIT_FUNC_TRACE();
435 #if defined(LS1012A_PFE_RESET_WA)
436 pfe_hif_rx_idle(hif);
439 err = pfe_hif_alloc_descr(hif);
443 rte_spinlock_init(&hif->tx_lock);
444 rte_spinlock_init(&hif->lock);
446 gpi_enable(HGPI_BASE_ADDR);
447 if (getenv("PFE_INTR_SUPPORT")) {
448 struct epoll_event epoll_ev;
449 int event_fd = -1, epoll_fd, pfe_cdev_fd;
451 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
452 if (pfe_cdev_fd < 0) {
453 PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
455 pfe->cdev_fd = PFE_CDEV_INVALID_FD;
458 pfe->cdev_fd = pfe_cdev_fd;
460 event_fd = eventfd(0, EFD_NONBLOCK);
461 /* hif interrupt enable */
462 err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
464 PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n",
468 epoll_fd = epoll_create(1);
469 epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
470 epoll_ev.data.fd = event_fd;
471 err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
473 PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno);
476 pfe->hif.epoll_fd = epoll_fd;
485 pfe_hif_exit(struct pfe *pfe)
487 struct pfe_hif *hif = &pfe->hif;
489 PMD_INIT_FUNC_TRACE();
491 rte_spinlock_lock(&hif->lock);
492 hif->shm->g_client_status[0] = 0;
493 /* Make sure all clients are disabled*/
494 hif->shm->g_client_status[1] = 0;
496 rte_spinlock_unlock(&hif->lock);
499 #if defined(LS1012A_PFE_RESET_WA)
500 pfe_hif_rx_idle(hif);
506 pfe_hif_free_descr(hif);
507 pfe->hif.setuped = 0;
509 gpi_disable(HGPI_BASE_ADDR);