net/pfe: add queue setup and release
[dpdk.git] / drivers / net / pfe / pfe_hif.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2019 NXP
3  */
4
5 #include "pfe_logs.h"
6 #include "pfe_mod.h"
7 #include <sys/ioctl.h>
8 #include <sys/epoll.h>
9 #include <sys/eventfd.h>
10
11 static int
12 pfe_hif_alloc_descr(struct pfe_hif *hif)
13 {
14         void *addr;
15         int err = 0;
16
17         PMD_INIT_FUNC_TRACE();
18
19         addr = rte_zmalloc(NULL, HIF_RX_DESC_NT * sizeof(struct hif_desc) +
20                 HIF_TX_DESC_NT * sizeof(struct hif_desc), RTE_CACHE_LINE_SIZE);
21         if (!addr) {
22                 PFE_PMD_ERR("Could not allocate buffer descriptors!");
23                 err = -ENOMEM;
24                 goto err0;
25         }
26
27         hif->descr_baseaddr_p = pfe_mem_vtop((uintptr_t)addr);
28         hif->descr_baseaddr_v = addr;
29         hif->rx_ring_size = HIF_RX_DESC_NT;
30         hif->tx_ring_size = HIF_TX_DESC_NT;
31
32         return 0;
33
34 err0:
35         return err;
36 }
37
38 static void
39 pfe_hif_free_descr(struct pfe_hif *hif)
40 {
41         PMD_INIT_FUNC_TRACE();
42
43         rte_free(hif->descr_baseaddr_v);
44 }
45
46 /*
47  * pfe_hif_init_buffers
48  * This function initializes the HIF Rx/Tx ring descriptors and
49  * initialize Rx queue with buffers.
50  */
51 int
52 pfe_hif_init_buffers(struct pfe_hif *hif)
53 {
54         struct hif_desc *desc, *first_desc_p;
55         uint32_t i = 0;
56
57         PMD_INIT_FUNC_TRACE();
58
59         /* Check enough Rx buffers available in the shared memory */
60         if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
61                 return -ENOMEM;
62
63         hif->rx_base = hif->descr_baseaddr_v;
64         memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
65
66         /*Initialize Rx descriptors */
67         desc = hif->rx_base;
68         first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
69
70         for (i = 0; i < hif->rx_ring_size; i++) {
71                 /* Initialize Rx buffers from the shared memory */
72                 struct rte_mbuf *mbuf =
73                         (struct rte_mbuf *)hif->shm->rx_buf_pool[i];
74
75                 /* PFE mbuf structure is as follow:
76                  * ----------------------------------------------------------+
77                  * | mbuf  | priv | headroom (annotation + PFE data) | data  |
78                  * ----------------------------------------------------------+
79                  *
80                  * As we are expecting additional information like parse
81                  * results, eth id, queue id from PFE block along with data.
82                  * so we have to provide additional memory for each packet to
83                  * HIF rx rings so that PFE block can write its headers.
84                  * so, we are giving the data pointor to HIF rings whose
85                  * calculation is as below:
86                  * mbuf->data_pointor - Required_header_size
87                  *
88                  * We are utilizing the HEADROOM area to receive the PFE
89                  * block headers. On packet reception, HIF driver will use
90                  * PFE headers information based on which it will decide
91                  * the clients and fill the parse results.
92                  * after that application can use/overwrite the HEADROOM area.
93                  */
94                 hif->rx_buf_vaddr[i] =
95                         (void *)((size_t)mbuf->buf_addr + mbuf->data_off -
96                                         PFE_PKT_HEADER_SZ);
97                 hif->rx_buf_addr[i] =
98                         (void *)(size_t)(rte_pktmbuf_iova(mbuf) -
99                                         PFE_PKT_HEADER_SZ);
100                 hif->rx_buf_len[i] =  mbuf->buf_len - RTE_PKTMBUF_HEADROOM;
101
102                 hif->shm->rx_buf_pool[i] = NULL;
103
104                 writel(DDR_PHYS_TO_PFE(hif->rx_buf_addr[i]),
105                                         &desc->data);
106                 writel(0, &desc->status);
107
108                 /*
109                  * Ensure everything else is written to DDR before
110                  * writing bd->ctrl
111                  */
112                 rte_wmb();
113
114                 writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
115                         | BD_CTRL_DIR | BD_CTRL_DESC_EN
116                         | BD_BUF_LEN(hif->rx_buf_len[i])), &desc->ctrl);
117
118                 /* Chain descriptors */
119                 writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
120                 desc++;
121         }
122
123         /* Overwrite last descriptor to chain it to first one*/
124         desc--;
125         writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
126
127         hif->rxtoclean_index = 0;
128
129         /*Initialize Rx buffer descriptor ring base address */
130         writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
131
132         hif->tx_base = hif->rx_base + hif->rx_ring_size;
133         first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
134                                 hif->rx_ring_size;
135         memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
136
137         /*Initialize tx descriptors */
138         desc = hif->tx_base;
139
140         for (i = 0; i < hif->tx_ring_size; i++) {
141                 /* Chain descriptors */
142                 writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
143                 writel(0, &desc->ctrl);
144                 desc++;
145         }
146
147         /* Overwrite last descriptor to chain it to first one */
148         desc--;
149         writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
150         hif->txavail = hif->tx_ring_size;
151         hif->txtosend = 0;
152         hif->txtoclean = 0;
153         hif->txtoflush = 0;
154
155         /*Initialize Tx buffer descriptor ring base address */
156         writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
157
158         return 0;
159 }
160
161 /*
162  * pfe_hif_client_register
163  *
164  * This function used to register a client driver with the HIF driver.
165  *
166  * Return value:
167  * 0 - on Successful registration
168  */
169 static int
170 pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
171                         struct hif_client_shm *client_shm)
172 {
173         struct hif_client *client = &hif->client[client_id];
174         u32 i, cnt;
175         struct rx_queue_desc *rx_qbase;
176         struct tx_queue_desc *tx_qbase;
177         struct hif_rx_queue *rx_queue;
178         struct hif_tx_queue *tx_queue;
179         int err = 0;
180
181         PMD_INIT_FUNC_TRACE();
182
183         rte_spinlock_lock(&hif->tx_lock);
184
185         if (test_bit(client_id, &hif->shm->g_client_status[0])) {
186                 PFE_PMD_ERR("client %d already registered", client_id);
187                 err = -1;
188                 goto unlock;
189         }
190
191         memset(client, 0, sizeof(struct hif_client));
192
193         /* Initialize client Rx queues baseaddr, size */
194
195         cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
196         /* Check if client is requesting for more queues than supported */
197         if (cnt > HIF_CLIENT_QUEUES_MAX)
198                 cnt = HIF_CLIENT_QUEUES_MAX;
199
200         client->rx_qn = cnt;
201         rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
202         for (i = 0; i < cnt; i++) {
203                 rx_queue = &client->rx_q[i];
204                 rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
205                 rx_queue->size = client_shm->rx_qsize;
206                 rx_queue->write_idx = 0;
207         }
208
209         /* Initialize client Tx queues baseaddr, size */
210         cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
211
212         /* Check if client is requesting for more queues than supported */
213         if (cnt > HIF_CLIENT_QUEUES_MAX)
214                 cnt = HIF_CLIENT_QUEUES_MAX;
215
216         client->tx_qn = cnt;
217         tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
218         for (i = 0; i < cnt; i++) {
219                 tx_queue = &client->tx_q[i];
220                 tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
221                 tx_queue->size = client_shm->tx_qsize;
222                 tx_queue->ack_idx = 0;
223         }
224
225         set_bit(client_id, &hif->shm->g_client_status[0]);
226
227 unlock:
228         rte_spinlock_unlock(&hif->tx_lock);
229
230         return err;
231 }
232
233 /*
234  * pfe_hif_client_unregister
235  *
236  * This function used to unregister a client  from the HIF driver.
237  *
238  */
239 static void
240 pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
241 {
242         PMD_INIT_FUNC_TRACE();
243
244         /*
245          * Mark client as no longer available (which prevents further packet
246          * receive for this client)
247          */
248         rte_spinlock_lock(&hif->tx_lock);
249
250         if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
251                 PFE_PMD_ERR("client %d not registered", client_id);
252
253                 rte_spinlock_unlock(&hif->tx_lock);
254                 return;
255         }
256
257         clear_bit(client_id, &hif->shm->g_client_status[0]);
258
259         rte_spinlock_unlock(&hif->tx_lock);
260 }
261
262 void
263 hif_process_client_req(struct pfe_hif *hif, int req,
264                        int data1, __rte_unused int data2)
265 {
266         unsigned int client_id = data1;
267
268         if (client_id >= HIF_CLIENTS_MAX) {
269                 PFE_PMD_ERR("client id %d out of bounds", client_id);
270                 return;
271         }
272
273         switch (req) {
274         case REQUEST_CL_REGISTER:
275                         /* Request for register a client */
276                         PFE_PMD_INFO("register client_id %d", client_id);
277                         pfe_hif_client_register(hif, client_id, (struct
278                                 hif_client_shm *)&hif->shm->client[client_id]);
279                         break;
280
281         case REQUEST_CL_UNREGISTER:
282                         PFE_PMD_INFO("unregister client_id %d", client_id);
283
284                         /* Request for unregister a client */
285                         pfe_hif_client_unregister(hif, client_id);
286
287                         break;
288
289         default:
290                         PFE_PMD_ERR("unsupported request %d", req);
291                         break;
292         }
293
294         /*
295          * Process client Tx queues
296          * Currently we don't have checking for tx pending
297          */
298 }
299
300 #if defined(LS1012A_PFE_RESET_WA)
301 static void
302 pfe_hif_disable_rx_desc(struct pfe_hif *hif)
303 {
304         u32 ii;
305         struct hif_desc *desc = hif->rx_base;
306
307         /*Mark all descriptors as LAST_BD */
308         for (ii = 0; ii < hif->rx_ring_size; ii++) {
309                 desc->ctrl |= BD_CTRL_LAST_BD;
310                 desc++;
311         }
312 }
313
314 struct class_rx_hdr_t {
315         u32     next_ptr;       /* ptr to the start of the first DDR buffer */
316         u16     length;         /* total packet length */
317         u16     phyno;          /* input physical port number */
318         u32     status;         /* gemac status bits */
319         u32     status2;            /* reserved for software usage */
320 };
321
322 /* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
323  * except overflow
324  */
325 #define STATUS_BAD_FRAME_ERR            BIT(16)
326 #define STATUS_LENGTH_ERR               BIT(17)
327 #define STATUS_CRC_ERR                  BIT(18)
328 #define STATUS_TOO_SHORT_ERR            BIT(19)
329 #define STATUS_TOO_LONG_ERR             BIT(20)
330 #define STATUS_CODE_ERR                 BIT(21)
331 #define STATUS_MC_HASH_MATCH            BIT(22)
332 #define STATUS_CUMULATIVE_ARC_HIT       BIT(23)
333 #define STATUS_UNICAST_HASH_MATCH       BIT(24)
334 #define STATUS_IP_CHECKSUM_CORRECT      BIT(25)
335 #define STATUS_TCP_CHECKSUM_CORRECT     BIT(26)
336 #define STATUS_UDP_CHECKSUM_CORRECT     BIT(27)
337 #define STATUS_OVERFLOW_ERR             BIT(28) /* GPI error */
338 #define MIN_PKT_SIZE                    64
339 #define DUMMY_PKT_COUNT                 128
340
341 static inline void
342 copy_to_lmem(u32 *dst, u32 *src, int len)
343 {
344         int i;
345
346         for (i = 0; i < len; i += sizeof(u32))  {
347                 *dst = htonl(*src);
348                 dst++; src++;
349         }
350 }
351 #if defined(RTE_TOOLCHAIN_GCC)
352 __attribute__ ((optimize(1)))
353 #endif
354 static void
355 send_dummy_pkt_to_hif(void)
356 {
357         void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
358         u64 physaddr;
359         struct class_rx_hdr_t local_hdr;
360         static u32 dummy_pkt[] =  {
361                 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
362                 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
363                 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
364                 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
365
366         ddr_ptr = (void *)(size_t)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL);
367         if (!ddr_ptr)
368                 return;
369
370         lmem_ptr = (void *)(size_t)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL);
371         if (!lmem_ptr)
372                 return;
373
374         PFE_PMD_INFO("Sending a dummy pkt to HIF %p %p", ddr_ptr, lmem_ptr);
375         physaddr = DDR_VIRT_TO_PFE(ddr_ptr);
376
377         lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long)lmem_ptr);
378
379         local_hdr.phyno = htons(0); /* RX_PHY_0 */
380         local_hdr.length = htons(MIN_PKT_SIZE);
381
382         local_hdr.next_ptr = htonl((u32)physaddr);
383         /*Mark checksum is correct */
384         local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
385                                 STATUS_UDP_CHECKSUM_CORRECT |
386                                 STATUS_TCP_CHECKSUM_CORRECT |
387                                 STATUS_UNICAST_HASH_MATCH |
388                                 STATUS_CUMULATIVE_ARC_HIT));
389         copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
390                      sizeof(local_hdr));
391
392         copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
393                      0x40);
394
395         writel((unsigned long)lmem_ptr, CLASS_INQ_PKTPTR);
396 }
397
398 void
399 pfe_hif_rx_idle(struct pfe_hif *hif)
400 {
401         int hif_stop_loop = DUMMY_PKT_COUNT;
402         u32 rx_status;
403
404         pfe_hif_disable_rx_desc(hif);
405         PFE_PMD_INFO("Bringing hif to idle state...");
406         writel(0, HIF_INT_ENABLE);
407         /*If HIF Rx BDP is busy send a dummy packet */
408         do {
409                 rx_status = readl(HIF_RX_STATUS);
410                 if (rx_status & BDP_CSR_RX_DMA_ACTV)
411                         send_dummy_pkt_to_hif();
412
413                 sleep(1);
414         } while (--hif_stop_loop);
415
416         if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
417                 PFE_PMD_ERR("Failed\n");
418         else
419                 PFE_PMD_INFO("Done\n");
420 }
421 #endif
422
423 /*
424  * pfe_hif_init
425  * This function initializes the baseaddresses and irq, etc.
426  */
427 int
428 pfe_hif_init(struct pfe *pfe)
429 {
430         struct pfe_hif *hif = &pfe->hif;
431         int err;
432
433         PMD_INIT_FUNC_TRACE();
434
435 #if defined(LS1012A_PFE_RESET_WA)
436         pfe_hif_rx_idle(hif);
437 #endif
438
439         err = pfe_hif_alloc_descr(hif);
440         if (err)
441                 goto err0;
442
443         rte_spinlock_init(&hif->tx_lock);
444         rte_spinlock_init(&hif->lock);
445
446         gpi_enable(HGPI_BASE_ADDR);
447         if (getenv("PFE_INTR_SUPPORT")) {
448                 struct epoll_event epoll_ev;
449                 int event_fd = -1, epoll_fd, pfe_cdev_fd;
450
451                 pfe_cdev_fd = open(PFE_CDEV_PATH, O_RDWR);
452                 if (pfe_cdev_fd < 0) {
453                         PFE_PMD_WARN("Unable to open PFE device file (%s).\n",
454                                      PFE_CDEV_PATH);
455                         pfe->cdev_fd = PFE_CDEV_INVALID_FD;
456                         return -1;
457                 }
458                 pfe->cdev_fd = pfe_cdev_fd;
459
460                 event_fd = eventfd(0, EFD_NONBLOCK);
461                 /* hif interrupt enable */
462                 err = ioctl(pfe->cdev_fd, PFE_CDEV_HIF_INTR_EN, &event_fd);
463                 if (err) {
464                         PFE_PMD_ERR("\nioctl failed for intr enable err: %d\n",
465                                         errno);
466                         goto err0;
467                 }
468                 epoll_fd = epoll_create(1);
469                 epoll_ev.events = EPOLLIN | EPOLLPRI | EPOLLET;
470                 epoll_ev.data.fd = event_fd;
471                 err = epoll_ctl(epoll_fd, EPOLL_CTL_ADD, event_fd, &epoll_ev);
472                 if (err < 0) {
473                         PFE_PMD_ERR("epoll_ctl failed with err = %d\n", errno);
474                         goto err0;
475                 }
476                 pfe->hif.epoll_fd = epoll_fd;
477         }
478         return 0;
479 err0:
480         return err;
481 }
482
483 /* pfe_hif_exit- */
484 void
485 pfe_hif_exit(struct pfe *pfe)
486 {
487         struct pfe_hif *hif = &pfe->hif;
488
489         PMD_INIT_FUNC_TRACE();
490
491         rte_spinlock_lock(&hif->lock);
492         hif->shm->g_client_status[0] = 0;
493         /* Make sure all clients are disabled*/
494         hif->shm->g_client_status[1] = 0;
495
496         rte_spinlock_unlock(&hif->lock);
497
498         if (hif->setuped) {
499 #if defined(LS1012A_PFE_RESET_WA)
500                 pfe_hif_rx_idle(hif);
501 #endif
502                 /*Disable Rx/Tx */
503                 hif_rx_disable();
504                 hif_tx_disable();
505
506                 pfe_hif_free_descr(hif);
507                 pfe->hif.setuped = 0;
508         }
509         gpi_disable(HGPI_BASE_ADDR);
510 }