1ba27c9a124df8eef7469c1ca3182973c42c02fe
[dpdk.git] / drivers / raw / octeontx2_ep / otx2_ep_enqdeq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2019 Marvell International Ltd.
3  */
4
5 #include <string.h>
6 #include <unistd.h>
7 #include <dirent.h>
8 #include <fcntl.h>
9
10 #include <rte_bus.h>
11 #include <rte_bus_pci.h>
12 #include <rte_eal.h>
13 #include <rte_lcore.h>
14 #include <rte_mempool.h>
15 #include <rte_pci.h>
16
17 #include <rte_common.h>
18 #include <rte_rawdev.h>
19 #include <rte_rawdev_pmd.h>
20
21 #include "otx2_common.h"
22 #include "otx2_ep_enqdeq.h"
23
24 static void
25 sdp_dmazone_free(const struct rte_memzone *mz)
26 {
27         const struct rte_memzone *mz_tmp;
28         int ret = 0;
29
30         if (mz == NULL) {
31                 otx2_err("Memzone %s : NULL", mz->name);
32                 return;
33         }
34
35         mz_tmp = rte_memzone_lookup(mz->name);
36         if (mz_tmp == NULL) {
37                 otx2_err("Memzone %s Not Found", mz->name);
38                 return;
39         }
40
41         ret = rte_memzone_free(mz);
42         if (ret)
43                 otx2_err("Memzone free failed : ret = %d", ret);
44
45 }
46
47 /* Free IQ resources */
48 int
49 sdp_delete_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
50 {
51         struct sdp_instr_queue *iq;
52
53         iq = sdpvf->instr_queue[iq_no];
54         if (iq == NULL) {
55                 otx2_err("Invalid IQ[%d]\n", iq_no);
56                 return -ENOMEM;
57         }
58
59         rte_free(iq->req_list);
60         iq->req_list = NULL;
61
62         if (iq->iq_mz) {
63                 sdp_dmazone_free(iq->iq_mz);
64                 iq->iq_mz = NULL;
65         }
66
67         rte_free(sdpvf->instr_queue[iq_no]);
68         sdpvf->instr_queue[iq_no] = NULL;
69
70         sdpvf->num_iqs--;
71
72         otx2_info("IQ[%d] is deleted", iq_no);
73
74         return 0;
75 }
76
77 /* IQ initialization */
78 static int
79 sdp_init_instr_queue(struct sdp_device *sdpvf, int iq_no)
80 {
81         const struct sdp_config *conf;
82         struct sdp_instr_queue *iq;
83         uint32_t q_size;
84
85         conf = sdpvf->conf;
86         iq = sdpvf->instr_queue[iq_no];
87         q_size = conf->iq.instr_type * conf->num_iqdef_descs;
88
89         /* IQ memory creation for Instruction submission to OCTEON TX2 */
90         iq->iq_mz = rte_memzone_reserve_aligned("iqmz",
91                                         q_size,
92                                         rte_socket_id(),
93                                         RTE_MEMZONE_IOVA_CONTIG,
94                                         RTE_CACHE_LINE_SIZE);
95         if (iq->iq_mz == NULL) {
96                 otx2_err("IQ[%d] memzone alloc failed", iq_no);
97                 goto iq_init_fail;
98         }
99
100         iq->base_addr_dma = iq->iq_mz->iova;
101         iq->base_addr = (uint8_t *)iq->iq_mz->addr;
102
103         if (conf->num_iqdef_descs & (conf->num_iqdef_descs - 1)) {
104                 otx2_err("IQ[%d] descs not in power of 2", iq_no);
105                 goto iq_init_fail;
106         }
107
108         iq->nb_desc = conf->num_iqdef_descs;
109
110         /* Create a IQ request list to hold requests that have been
111          * posted to OCTEON TX2. This list will be used for freeing the IQ
112          * data buffer(s) later once the OCTEON TX2 fetched the requests.
113          */
114         iq->req_list = rte_zmalloc_socket("request_list",
115                         (iq->nb_desc * SDP_IQREQ_LIST_SIZE),
116                         RTE_CACHE_LINE_SIZE,
117                         rte_socket_id());
118         if (iq->req_list == NULL) {
119                 otx2_err("IQ[%d] req_list alloc failed", iq_no);
120                 goto iq_init_fail;
121         }
122
123         otx2_info("IQ[%d]: base: %p basedma: %lx count: %d",
124                      iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
125                      iq->nb_desc);
126
127         iq->sdp_dev = sdpvf;
128         iq->q_no = iq_no;
129         iq->fill_cnt = 0;
130         iq->host_write_index = 0;
131         iq->otx_read_index = 0;
132         iq->flush_index = 0;
133
134         /* Initialize the spinlock for this instruction queue */
135         rte_spinlock_init(&iq->lock);
136         rte_spinlock_init(&iq->post_lock);
137
138         rte_atomic64_clear(&iq->iq_flush_running);
139
140         sdpvf->io_qmask.iq |= (1ull << iq_no);
141
142         /* Set 32B/64B mode for each input queue */
143         if (conf->iq.instr_type == 64)
144                 sdpvf->io_qmask.iq64B |= (1ull << iq_no);
145
146         iq->iqcmd_64B = (conf->iq.instr_type == 64);
147
148         /* Set up IQ registers */
149         sdpvf->fn_list.setup_iq_regs(sdpvf, iq_no);
150
151         return 0;
152
153 iq_init_fail:
154         return -ENOMEM;
155
156 }
157
158 int
159 sdp_setup_iqs(struct sdp_device *sdpvf, uint32_t iq_no)
160 {
161         struct sdp_instr_queue *iq;
162
163         iq = (struct sdp_instr_queue *)rte_zmalloc("sdp_IQ", sizeof(*iq),
164                                                 RTE_CACHE_LINE_SIZE);
165         if (iq == NULL)
166                 return -ENOMEM;
167
168         sdpvf->instr_queue[iq_no] = iq;
169
170         if (sdp_init_instr_queue(sdpvf, iq_no)) {
171                 otx2_err("IQ init is failed");
172                 goto delete_IQ;
173         }
174         otx2_info("IQ[%d] is created.", sdpvf->num_iqs);
175
176         sdpvf->num_iqs++;
177
178
179         return 0;
180
181 delete_IQ:
182         sdp_delete_iqs(sdpvf, iq_no);
183         return -ENOMEM;
184 }
185
186 static void
187 sdp_droq_reset_indices(struct sdp_droq *droq)
188 {
189         droq->read_idx  = 0;
190         droq->write_idx = 0;
191         droq->refill_idx = 0;
192         droq->refill_count = 0;
193         rte_atomic64_set(&droq->pkts_pending, 0);
194 }
195
196 static void
197 sdp_droq_destroy_ring_buffers(struct sdp_device *sdpvf,
198                                 struct sdp_droq *droq)
199 {
200         uint32_t idx;
201
202         for (idx = 0; idx < droq->nb_desc; idx++) {
203                 if (droq->recv_buf_list[idx].buffer) {
204                         rte_mempool_put(sdpvf->enqdeq_mpool,
205                                 droq->recv_buf_list[idx].buffer);
206
207                         droq->recv_buf_list[idx].buffer = NULL;
208                 }
209         }
210
211         sdp_droq_reset_indices(droq);
212 }
213
214 /* Free OQs resources */
215 int
216 sdp_delete_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
217 {
218         struct sdp_droq *droq;
219
220         droq = sdpvf->droq[oq_no];
221         if (droq == NULL) {
222                 otx2_err("Invalid droq[%d]", oq_no);
223                 return -ENOMEM;
224         }
225
226         sdp_droq_destroy_ring_buffers(sdpvf, droq);
227         rte_free(droq->recv_buf_list);
228         droq->recv_buf_list = NULL;
229
230         if (droq->info_mz) {
231                 sdp_dmazone_free(droq->info_mz);
232                 droq->info_mz = NULL;
233         }
234
235         if (droq->desc_ring_mz) {
236                 sdp_dmazone_free(droq->desc_ring_mz);
237                 droq->desc_ring_mz = NULL;
238         }
239
240         memset(droq, 0, SDP_DROQ_SIZE);
241
242         rte_free(sdpvf->droq[oq_no]);
243         sdpvf->droq[oq_no] = NULL;
244
245         sdpvf->num_oqs--;
246
247         otx2_info("OQ[%d] is deleted", oq_no);
248         return 0;
249 }
250
251 static int
252 sdp_droq_setup_ring_buffers(struct sdp_device *sdpvf,
253                 struct sdp_droq *droq)
254 {
255         struct sdp_droq_desc *desc_ring = droq->desc_ring;
256         uint32_t idx;
257         void *buf;
258
259         for (idx = 0; idx < droq->nb_desc; idx++) {
260                 rte_mempool_get(sdpvf->enqdeq_mpool, &buf);
261                 if (buf == NULL) {
262                         otx2_err("OQ buffer alloc failed");
263                         droq->stats.rx_alloc_failure++;
264                         /* sdp_droq_destroy_ring_buffers(droq);*/
265                         return -ENOMEM;
266                 }
267
268                 droq->recv_buf_list[idx].buffer = buf;
269                 droq->info_list[idx].length = 0;
270
271                 /* Map ring buffers into memory */
272                 desc_ring[idx].info_ptr = (uint64_t)(droq->info_list_dma +
273                         (idx * SDP_DROQ_INFO_SIZE));
274
275                 desc_ring[idx].buffer_ptr = rte_mem_virt2iova(buf);
276         }
277
278         sdp_droq_reset_indices(droq);
279
280         return 0;
281 }
282
283 static void *
284 sdp_alloc_info_buffer(struct sdp_device *sdpvf __rte_unused,
285         struct sdp_droq *droq)
286 {
287         droq->info_mz = rte_memzone_reserve_aligned("OQ_info_list",
288                                 (droq->nb_desc * SDP_DROQ_INFO_SIZE),
289                                 rte_socket_id(),
290                                 RTE_MEMZONE_IOVA_CONTIG,
291                                 RTE_CACHE_LINE_SIZE);
292
293         if (droq->info_mz == NULL)
294                 return NULL;
295
296         droq->info_list_dma = droq->info_mz->iova;
297         droq->info_alloc_size = droq->info_mz->len;
298         droq->info_base_addr = (size_t)droq->info_mz->addr;
299
300         return droq->info_mz->addr;
301 }
302
303 /* OQ initialization */
304 static int
305 sdp_init_droq(struct sdp_device *sdpvf, uint32_t q_no)
306 {
307         const struct sdp_config *conf = sdpvf->conf;
308         uint32_t c_refill_threshold;
309         uint32_t desc_ring_size;
310         struct sdp_droq *droq;
311
312         otx2_info("OQ[%d] Init start", q_no);
313
314         droq = sdpvf->droq[q_no];
315         droq->sdp_dev = sdpvf;
316         droq->q_no = q_no;
317
318         c_refill_threshold = conf->oq.refill_threshold;
319         droq->nb_desc      = conf->num_oqdef_descs;
320         droq->buffer_size  = conf->oqdef_buf_size;
321
322         /* OQ desc_ring set up */
323         desc_ring_size = droq->nb_desc * SDP_DROQ_DESC_SIZE;
324         droq->desc_ring_mz = rte_memzone_reserve_aligned("sdp_oqmz",
325                                                 desc_ring_size,
326                                                 rte_socket_id(),
327                                                 RTE_MEMZONE_IOVA_CONTIG,
328                                                 RTE_CACHE_LINE_SIZE);
329
330         if (droq->desc_ring_mz == NULL) {
331                 otx2_err("OQ:%d desc_ring allocation failed", q_no);
332                 goto init_droq_fail;
333         }
334
335         droq->desc_ring_dma = droq->desc_ring_mz->iova;
336         droq->desc_ring = (struct sdp_droq_desc *)droq->desc_ring_mz->addr;
337
338         otx2_sdp_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx",
339                     q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
340         otx2_sdp_dbg("OQ[%d]: num_desc: %d", q_no, droq->nb_desc);
341
342
343         /* OQ info_list set up */
344         droq->info_list = sdp_alloc_info_buffer(sdpvf, droq);
345         if (droq->info_list == NULL) {
346                 otx2_err("memory allocation failed for OQ[%d] info_list", q_no);
347                 goto init_droq_fail;
348         }
349
350         /* OQ buf_list set up */
351         droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
352                                 (droq->nb_desc * SDP_DROQ_RECVBUF_SIZE),
353                                  RTE_CACHE_LINE_SIZE, rte_socket_id());
354         if (droq->recv_buf_list == NULL) {
355                 otx2_err("OQ recv_buf_list alloc failed");
356                 goto init_droq_fail;
357         }
358
359         if (sdp_droq_setup_ring_buffers(sdpvf, droq))
360                 goto init_droq_fail;
361
362         droq->refill_threshold = c_refill_threshold;
363         rte_spinlock_init(&droq->lock);
364
365
366         /* Set up OQ registers */
367         sdpvf->fn_list.setup_oq_regs(sdpvf, q_no);
368
369         sdpvf->io_qmask.oq |= (1ull << q_no);
370
371         return 0;
372
373 init_droq_fail:
374         return -ENOMEM;
375 }
376
377 /* OQ configuration and setup */
378 int
379 sdp_setup_oqs(struct sdp_device *sdpvf, uint32_t oq_no)
380 {
381         struct sdp_droq *droq;
382
383         /* Allocate new droq. */
384         droq = (struct sdp_droq *)rte_zmalloc("sdp_OQ",
385                                 sizeof(*droq), RTE_CACHE_LINE_SIZE);
386         if (droq == NULL) {
387                 otx2_err("Droq[%d] Creation Failed", oq_no);
388                 return -ENOMEM;
389         }
390         sdpvf->droq[oq_no] = droq;
391
392         if (sdp_init_droq(sdpvf, oq_no)) {
393                 otx2_err("Droq[%d] Initialization failed", oq_no);
394                 goto delete_OQ;
395         }
396         otx2_info("OQ[%d] is created.", oq_no);
397
398         sdpvf->num_oqs++;
399
400         return 0;
401
402 delete_OQ:
403         sdp_delete_oqs(sdpvf, oq_no);
404         return -ENOMEM;
405 }
406
407 static inline void
408 sdp_iqreq_delete(struct sdp_device *sdpvf,
409                 struct sdp_instr_queue *iq, uint32_t idx)
410 {
411         uint32_t reqtype;
412         void *buf;
413
414         buf     = iq->req_list[idx].buf;
415         reqtype = iq->req_list[idx].reqtype;
416
417         switch (reqtype) {
418         case SDP_REQTYPE_NORESP:
419                 rte_mempool_put(sdpvf->enqdeq_mpool, buf);
420                 otx2_sdp_dbg("IQ buffer freed at idx[%d]", idx);
421                 break;
422
423         case SDP_REQTYPE_NORESP_GATHER:
424         case SDP_REQTYPE_NONE:
425         default:
426                 otx2_info("This iqreq mode is not supported:%d", reqtype);
427
428         }
429
430         /* Reset the request list at this index */
431         iq->req_list[idx].buf = NULL;
432         iq->req_list[idx].reqtype = 0;
433 }
434
435 static inline void
436 sdp_iqreq_add(struct sdp_instr_queue *iq, void *buf,
437                 uint32_t reqtype)
438 {
439         iq->req_list[iq->host_write_index].buf = buf;
440         iq->req_list[iq->host_write_index].reqtype = reqtype;
441
442         otx2_sdp_dbg("IQ buffer added at idx[%d]", iq->host_write_index);
443
444 }
445
446 static void
447 sdp_flush_iq(struct sdp_device *sdpvf,
448                 struct sdp_instr_queue *iq,
449                 uint32_t pending_thresh __rte_unused)
450 {
451         uint32_t instr_processed = 0;
452
453         rte_spinlock_lock(&iq->lock);
454
455         iq->otx_read_index = sdpvf->fn_list.update_iq_read_idx(iq);
456         while (iq->flush_index != iq->otx_read_index) {
457                 /* Free the IQ data buffer to the pool */
458                 sdp_iqreq_delete(sdpvf, iq, iq->flush_index);
459                 iq->flush_index =
460                         sdp_incr_index(iq->flush_index, 1, iq->nb_desc);
461
462                 instr_processed++;
463         }
464
465         iq->stats.instr_processed = instr_processed;
466         rte_atomic64_sub(&iq->instr_pending, instr_processed);
467
468         rte_spinlock_unlock(&iq->lock);
469 }
470
471 static inline void
472 sdp_ring_doorbell(struct sdp_device *sdpvf __rte_unused,
473                 struct sdp_instr_queue *iq)
474 {
475         otx2_write64(iq->fill_cnt, iq->doorbell_reg);
476
477         /* Make sure doorbell writes observed by HW */
478         rte_cio_wmb();
479         iq->fill_cnt = 0;
480
481 }
482
483 static inline int
484 post_iqcmd(struct sdp_instr_queue *iq, uint8_t *iqcmd)
485 {
486         uint8_t *iqptr, cmdsize;
487
488         /* This ensures that the read index does not wrap around to
489          * the same position if queue gets full before OCTEON TX2 could
490          * fetch any instr.
491          */
492         if (rte_atomic64_read(&iq->instr_pending) >=
493                               (int32_t)(iq->nb_desc - 1)) {
494                 otx2_err("IQ is full, pending:%ld",
495                          (long)rte_atomic64_read(&iq->instr_pending));
496
497                 return SDP_IQ_SEND_FAILED;
498         }
499
500         /* Copy cmd into iq */
501         cmdsize = ((iq->iqcmd_64B) ? 64 : 32);
502         iqptr   = iq->base_addr + (cmdsize * iq->host_write_index);
503
504         rte_memcpy(iqptr, iqcmd, cmdsize);
505
506         otx2_sdp_dbg("IQ cmd posted @ index:%d", iq->host_write_index);
507
508         /* Increment the host write index */
509         iq->host_write_index =
510                 sdp_incr_index(iq->host_write_index, 1, iq->nb_desc);
511
512         iq->fill_cnt++;
513
514         /* Flush the command into memory. We need to be sure the data
515          * is in memory before indicating that the instruction is
516          * pending.
517          */
518         rte_smp_wmb();
519         rte_atomic64_inc(&iq->instr_pending);
520
521         /* SDP_IQ_SEND_SUCCESS */
522         return 0;
523 }
524
525
526 static int
527 sdp_send_data(struct sdp_device *sdpvf,
528               struct sdp_instr_queue *iq, void *cmd)
529 {
530         uint32_t ret;
531
532         /* Lock this IQ command queue before posting instruction */
533         rte_spinlock_lock(&iq->post_lock);
534
535         /* Submit IQ command */
536         ret = post_iqcmd(iq, cmd);
537
538         if (ret == SDP_IQ_SEND_SUCCESS) {
539                 sdp_ring_doorbell(sdpvf, iq);
540
541                 iq->stats.instr_posted++;
542                 otx2_sdp_dbg("Instr submit success posted: %ld\n",
543                              (long)iq->stats.instr_posted);
544
545         } else {
546                 iq->stats.instr_dropped++;
547                 otx2_err("Instr submit failed, dropped: %ld\n",
548                          (long)iq->stats.instr_dropped);
549
550         }
551
552         rte_spinlock_unlock(&iq->post_lock);
553
554         return ret;
555 }
556
557
558 /* Enqueue requests/packets to SDP IQ queue.
559  * returns number of requests enqueued successfully
560  */
561 int
562 sdp_rawdev_enqueue(struct rte_rawdev *rawdev,
563                    struct rte_rawdev_buf **buffers __rte_unused,
564                    unsigned int count, rte_rawdev_obj_t context)
565 {
566         struct sdp_instr_64B *iqcmd;
567         struct sdp_instr_queue *iq;
568         struct sdp_soft_instr *si;
569         struct sdp_device *sdpvf;
570
571         struct sdp_instr_ih ihx;
572
573         sdpvf = (struct sdp_device *)rawdev->dev_private;
574         si = (struct sdp_soft_instr *)context;
575
576         iq = sdpvf->instr_queue[si->q_no];
577
578         if ((count > 1) || (count < 1)) {
579                 otx2_err("This mode not supported: req[%d]", count);
580                 goto enq_fail;
581         }
582
583         memset(&ihx, 0, sizeof(struct sdp_instr_ih));
584
585         iqcmd = &si->command;
586         memset(iqcmd, 0, sizeof(struct sdp_instr_64B));
587
588         iqcmd->dptr = (uint64_t)si->dptr;
589
590         /* Populate SDP IH */
591         ihx.pkind  = sdpvf->pkind;
592         ihx.fsz    = si->ih.fsz + 8; /* 8B for NIX IH */
593         ihx.gather = si->ih.gather;
594
595         /* Direct data instruction */
596         ihx.tlen   = si->ih.tlen + ihx.fsz;
597
598         switch (ihx.gather) {
599         case 0: /* Direct data instr */
600                 ihx.tlen = si->ih.tlen + ihx.fsz;
601                 break;
602
603         default: /* Gather */
604                 switch (si->ih.gsz) {
605                 case 0: /* Direct gather instr */
606                         otx2_err("Direct Gather instr : not supported");
607                         goto enq_fail;
608
609                 default: /* Indirect gather instr */
610                         otx2_err("Indirect Gather instr : not supported");
611                         goto enq_fail;
612                 }
613         }
614
615         rte_memcpy(&iqcmd->ih, &ihx, sizeof(uint64_t));
616         iqcmd->rptr = (uint64_t)si->rptr;
617         rte_memcpy(&iqcmd->irh, &si->irh, sizeof(uint64_t));
618
619         /* Swap FSZ(front data) here, to avoid swapping on OCTEON TX2 side */
620         sdp_swap_8B_data(&iqcmd->rptr, 1);
621         sdp_swap_8B_data(&iqcmd->irh, 1);
622
623         otx2_sdp_dbg("After swapping");
624         otx2_sdp_dbg("Word0 [dptr]: 0x%016lx", (unsigned long)iqcmd->dptr);
625         otx2_sdp_dbg("Word1 [ihtx]: 0x%016lx", (unsigned long)iqcmd->ih);
626         otx2_sdp_dbg("Word2 [rptr]: 0x%016lx", (unsigned long)iqcmd->rptr);
627         otx2_sdp_dbg("Word3 [irh]: 0x%016lx", (unsigned long)iqcmd->irh);
628         otx2_sdp_dbg("Word4 [exhdr[0]]: 0x%016lx",
629                         (unsigned long)iqcmd->exhdr[0]);
630
631         sdp_iqreq_add(iq, si->dptr, si->reqtype);
632
633         if (sdp_send_data(sdpvf, iq, iqcmd)) {
634                 otx2_err("Data send failed :");
635                 sdp_iqreq_delete(sdpvf, iq, iq->host_write_index);
636                 goto enq_fail;
637         }
638
639         if (rte_atomic64_read(&iq->instr_pending) >= 1)
640                 sdp_flush_iq(sdpvf, iq, 1 /*(iq->nb_desc / 2)*/);
641
642         /* Return no# of instructions posted successfully. */
643         return count;
644
645 enq_fail:
646         return SDP_IQ_SEND_FAILED;
647 }
648
649 static uint32_t
650 sdp_droq_refill(struct sdp_device *sdpvf, struct sdp_droq *droq)
651 {
652         struct sdp_droq_desc *desc_ring;
653         uint32_t desc_refilled = 0;
654         void *buf = NULL;
655
656         desc_ring = droq->desc_ring;
657
658         while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
659                 /* If a valid buffer exists (happens if there is no dispatch),
660                  * reuse the buffer, else allocate.
661                  */
662                 if (droq->recv_buf_list[droq->refill_idx].buffer != NULL)
663                         break;
664
665                 rte_mempool_get(sdpvf->enqdeq_mpool, &buf);
666                 /* If a buffer could not be allocated, no point in
667                  * continuing
668                  */
669                 if (buf == NULL) {
670                         droq->stats.rx_alloc_failure++;
671                         break;
672                 }
673
674                 droq->recv_buf_list[droq->refill_idx].buffer = buf;
675                 desc_ring[droq->refill_idx].buffer_ptr = rte_mem_virt2iova(buf);
676
677                 /* Reset any previous values in the length field. */
678                 droq->info_list[droq->refill_idx].length = 0;
679
680                 droq->refill_idx = sdp_incr_index(droq->refill_idx, 1,
681                                 droq->nb_desc);
682
683                 desc_refilled++;
684                 droq->refill_count--;
685
686         }
687
688         return desc_refilled;
689 }
690
691 static int
692 sdp_droq_read_packet(struct sdp_device *sdpvf __rte_unused,
693                      struct sdp_droq *droq,
694                      struct sdp_droq_pkt *droq_pkt)
695 {
696         struct sdp_droq_info *info;
697         uint32_t total_len = 0;
698         uint32_t pkt_len = 0;
699
700         info = &droq->info_list[droq->read_idx];
701         sdp_swap_8B_data((uint64_t *)&info->length, 1);
702         if (!info->length) {
703                 otx2_err("OQ info_list->length[%ld]", (long)info->length);
704                 goto oq_read_fail;
705         }
706
707         /* Deduce the actual data size */
708         info->length -= SDP_RH_SIZE;
709         total_len += (uint32_t)info->length;
710
711         otx2_sdp_dbg("OQ: pkt_len[%ld], buffer_size %d",
712                         (long)info->length, droq->buffer_size);
713         if (info->length > droq->buffer_size) {
714                 otx2_err("This mode is not supported: pkt_len > buffer_size");
715                 goto oq_read_fail;
716         }
717
718         if (info->length <= droq->buffer_size) {
719                 pkt_len = (uint32_t)info->length;
720                 droq_pkt->data = droq->recv_buf_list[droq->read_idx].buffer;
721                 droq_pkt->len  = pkt_len;
722
723                 droq->recv_buf_list[droq->read_idx].buffer = NULL;
724                 droq->read_idx = sdp_incr_index(droq->read_idx, 1,/* count */
725                                                 droq->nb_desc /* max rd idx */);
726                 droq->refill_count++;
727
728         }
729
730         info->length = 0;
731
732         return SDP_OQ_RECV_SUCCESS;
733
734 oq_read_fail:
735         return SDP_OQ_RECV_FAILED;
736 }
737
738 static inline uint32_t
739 sdp_check_droq_pkts(struct sdp_droq *droq, uint32_t burst_size)
740 {
741         uint32_t min_pkts = 0;
742         uint32_t new_pkts;
743         uint32_t pkt_count;
744
745         /* Latest available OQ packets */
746         pkt_count = rte_read32(droq->pkts_sent_reg);
747
748         /* Newly arrived packets */
749         new_pkts = pkt_count - droq->last_pkt_count;
750         otx2_sdp_dbg("Recvd [%d] new OQ pkts", new_pkts);
751
752         min_pkts = (new_pkts > burst_size) ? burst_size : new_pkts;
753         if (min_pkts) {
754                 rte_atomic64_add(&droq->pkts_pending, min_pkts);
755                 /* Back up the aggregated packet count so far */
756                 droq->last_pkt_count += min_pkts;
757         }
758
759         return min_pkts;
760 }
761
762 /* Check for response arrival from OCTEON TX2
763  * returns number of requests completed
764  */
765 int
766 sdp_rawdev_dequeue(struct rte_rawdev *rawdev,
767                    struct rte_rawdev_buf **buffers, unsigned int count,
768                    rte_rawdev_obj_t context __rte_unused)
769 {
770         struct sdp_droq_pkt *oq_pkt;
771         struct sdp_device *sdpvf;
772         struct sdp_droq *droq;
773
774         uint32_t q_no = 0, pkts;
775         uint32_t new_pkts;
776         uint32_t ret;
777
778         sdpvf = (struct sdp_device *)rawdev->dev_private;
779
780         droq = sdpvf->droq[q_no];
781         if (!droq) {
782                 otx2_err("Invalid droq[%d]", q_no);
783                 goto deq_fail;
784         }
785
786         /* Grab the lock */
787         rte_spinlock_lock(&droq->lock);
788
789         new_pkts = sdp_check_droq_pkts(droq, count);
790         if (!new_pkts) {
791                 otx2_sdp_dbg("Zero new_pkts:%d", new_pkts);
792                 goto deq_fail; /* No pkts at this moment */
793         }
794
795         otx2_sdp_dbg("Received new_pkts = %d", new_pkts);
796
797         for (pkts = 0; pkts < new_pkts; pkts++) {
798
799                 /* Push the received pkt to application */
800                 oq_pkt = (struct sdp_droq_pkt *)buffers[pkts];
801
802                 ret = sdp_droq_read_packet(sdpvf, droq, oq_pkt);
803                 if (ret) {
804                         otx2_err("DROQ read pakt failed.");
805                         goto deq_fail;
806                 }
807
808                 /* Stats */
809                 droq->stats.pkts_received++;
810                 droq->stats.bytes_received += oq_pkt->len;
811         }
812
813         /* Ack the h/w with no# of pkts read by Host */
814         rte_write32(pkts, droq->pkts_sent_reg);
815         rte_cio_wmb();
816
817         droq->last_pkt_count -= pkts;
818
819         otx2_sdp_dbg("DROQ pkts[%d] pushed to application", pkts);
820
821         /* Refill DROQ buffers */
822         if (droq->refill_count >= 2 /* droq->refill_threshold */) {
823                 int desc_refilled = sdp_droq_refill(sdpvf, droq);
824
825                 /* Flush the droq descriptor data to memory to be sure
826                  * that when we update the credits the data in memory is
827                  * accurate.
828                  */
829                 rte_write32(desc_refilled, droq->pkts_credit_reg);
830
831                 /* Ensure mmio write completes */
832                 rte_wmb();
833                 otx2_sdp_dbg("Refilled count = %d", desc_refilled);
834         }
835
836         /* Release the spin lock */
837         rte_spinlock_unlock(&droq->lock);
838
839         return pkts;
840
841 deq_fail:
842         rte_spinlock_unlock(&droq->lock);
843         return SDP_OQ_RECV_FAILED;
844 }