981147eff0889da155a16b3fb50e520250678463
[dpdk.git] / drivers / net / octeontx_ep / otx_ep_rxtx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4
5 #include <unistd.h>
6
7 #include <rte_eal.h>
8 #include <rte_mempool.h>
9 #include <rte_mbuf.h>
10 #include <rte_io.h>
11 #include <rte_net.h>
12 #include <ethdev_pci.h>
13
14 #include "otx_ep_common.h"
15 #include "otx_ep_vf.h"
16 #include "otx2_ep_vf.h"
17 #include "otx_ep_rxtx.h"
18
19 /* SDP_LENGTH_S specifies packet length and is of 8-byte size */
20 #define INFO_SIZE 8
21 #define DROQ_REFILL_THRESHOLD 16
22
23 static void
24 otx_ep_dmazone_free(const struct rte_memzone *mz)
25 {
26         const struct rte_memzone *mz_tmp;
27         int ret = 0;
28
29         if (mz == NULL) {
30                 otx_ep_err("Memzone %s : NULL\n", mz->name);
31                 return;
32         }
33
34         mz_tmp = rte_memzone_lookup(mz->name);
35         if (mz_tmp == NULL) {
36                 otx_ep_err("Memzone %s Not Found\n", mz->name);
37                 return;
38         }
39
40         ret = rte_memzone_free(mz);
41         if (ret)
42                 otx_ep_err("Memzone free failed : ret = %d\n", ret);
43 }
44
45 /* Free IQ resources */
46 int
47 otx_ep_delete_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no)
48 {
49         struct otx_ep_instr_queue *iq;
50
51         iq = otx_ep->instr_queue[iq_no];
52         if (iq == NULL) {
53                 otx_ep_err("Invalid IQ[%d]\n", iq_no);
54                 return -EINVAL;
55         }
56
57         rte_free(iq->req_list);
58         iq->req_list = NULL;
59
60         if (iq->iq_mz) {
61                 otx_ep_dmazone_free(iq->iq_mz);
62                 iq->iq_mz = NULL;
63         }
64
65         rte_free(otx_ep->instr_queue[iq_no]);
66         otx_ep->instr_queue[iq_no] = NULL;
67
68         otx_ep->nb_tx_queues--;
69
70         otx_ep_info("IQ[%d] is deleted\n", iq_no);
71
72         return 0;
73 }
74
75 /* IQ initialization */
76 static int
77 otx_ep_init_instr_queue(struct otx_ep_device *otx_ep, int iq_no, int num_descs,
78                      unsigned int socket_id)
79 {
80         const struct otx_ep_config *conf;
81         struct otx_ep_instr_queue *iq;
82         uint32_t q_size;
83
84         conf = otx_ep->conf;
85         iq = otx_ep->instr_queue[iq_no];
86         q_size = conf->iq.instr_type * num_descs;
87
88         /* IQ memory creation for Instruction submission to OCTEON TX2 */
89         iq->iq_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev,
90                                              "instr_queue", iq_no, q_size,
91                                              OTX_EP_PCI_RING_ALIGN,
92                                              socket_id);
93         if (iq->iq_mz == NULL) {
94                 otx_ep_err("IQ[%d] memzone alloc failed\n", iq_no);
95                 goto iq_init_fail;
96         }
97
98         iq->base_addr_dma = iq->iq_mz->iova;
99         iq->base_addr = (uint8_t *)iq->iq_mz->addr;
100
101         if (num_descs & (num_descs - 1)) {
102                 otx_ep_err("IQ[%d] descs not in power of 2\n", iq_no);
103                 goto iq_init_fail;
104         }
105
106         iq->nb_desc = num_descs;
107
108         /* Create a IQ request list to hold requests that have been
109          * posted to OCTEON TX2. This list will be used for freeing the IQ
110          * data buffer(s) later once the OCTEON TX2 fetched the requests.
111          */
112         iq->req_list = rte_zmalloc_socket("request_list",
113                         (iq->nb_desc * OTX_EP_IQREQ_LIST_SIZE),
114                         RTE_CACHE_LINE_SIZE,
115                         rte_socket_id());
116         if (iq->req_list == NULL) {
117                 otx_ep_err("IQ[%d] req_list alloc failed\n", iq_no);
118                 goto iq_init_fail;
119         }
120
121         otx_ep_info("IQ[%d]: base: %p basedma: %lx count: %d\n",
122                      iq_no, iq->base_addr, (unsigned long)iq->base_addr_dma,
123                      iq->nb_desc);
124
125         iq->otx_ep_dev = otx_ep;
126         iq->q_no = iq_no;
127         iq->fill_cnt = 0;
128         iq->host_write_index = 0;
129         iq->otx_read_index = 0;
130         iq->flush_index = 0;
131         iq->instr_pending = 0;
132
133         otx_ep->io_qmask.iq |= (1ull << iq_no);
134
135         /* Set 32B/64B mode for each input queue */
136         if (conf->iq.instr_type == 64)
137                 otx_ep->io_qmask.iq64B |= (1ull << iq_no);
138
139         iq->iqcmd_64B = (conf->iq.instr_type == 64);
140
141         /* Set up IQ registers */
142         otx_ep->fn_list.setup_iq_regs(otx_ep, iq_no);
143
144         return 0;
145
146 iq_init_fail:
147         return -ENOMEM;
148 }
149
150 int
151 otx_ep_setup_iqs(struct otx_ep_device *otx_ep, uint32_t iq_no, int num_descs,
152                  unsigned int socket_id)
153 {
154         struct otx_ep_instr_queue *iq;
155
156         iq = (struct otx_ep_instr_queue *)rte_zmalloc("otx_ep_IQ", sizeof(*iq),
157                                                 RTE_CACHE_LINE_SIZE);
158         if (iq == NULL)
159                 return -ENOMEM;
160
161         otx_ep->instr_queue[iq_no] = iq;
162
163         if (otx_ep_init_instr_queue(otx_ep, iq_no, num_descs, socket_id)) {
164                 otx_ep_err("IQ init is failed\n");
165                 goto delete_IQ;
166         }
167         otx_ep->nb_tx_queues++;
168
169         otx_ep_info("IQ[%d] is created.\n", iq_no);
170
171         return 0;
172
173 delete_IQ:
174         otx_ep_delete_iqs(otx_ep, iq_no);
175         return -ENOMEM;
176 }
177
178 static void
179 otx_ep_droq_reset_indices(struct otx_ep_droq *droq)
180 {
181         droq->read_idx  = 0;
182         droq->write_idx = 0;
183         droq->refill_idx = 0;
184         droq->refill_count = 0;
185         droq->last_pkt_count = 0;
186         droq->pkts_pending = 0;
187 }
188
189 static void
190 otx_ep_droq_destroy_ring_buffers(struct otx_ep_droq *droq)
191 {
192         uint32_t idx;
193
194         for (idx = 0; idx < droq->nb_desc; idx++) {
195                 if (droq->recv_buf_list[idx]) {
196                         rte_pktmbuf_free(droq->recv_buf_list[idx]);
197                         droq->recv_buf_list[idx] = NULL;
198                 }
199         }
200
201         otx_ep_droq_reset_indices(droq);
202 }
203
204 /* Free OQs resources */
205 int
206 otx_ep_delete_oqs(struct otx_ep_device *otx_ep, uint32_t oq_no)
207 {
208         struct otx_ep_droq *droq;
209
210         droq = otx_ep->droq[oq_no];
211         if (droq == NULL) {
212                 otx_ep_err("Invalid droq[%d]\n", oq_no);
213                 return -EINVAL;
214         }
215
216         otx_ep_droq_destroy_ring_buffers(droq);
217         rte_free(droq->recv_buf_list);
218         droq->recv_buf_list = NULL;
219
220         if (droq->desc_ring_mz) {
221                 otx_ep_dmazone_free(droq->desc_ring_mz);
222                 droq->desc_ring_mz = NULL;
223         }
224
225         memset(droq, 0, OTX_EP_DROQ_SIZE);
226
227         rte_free(otx_ep->droq[oq_no]);
228         otx_ep->droq[oq_no] = NULL;
229
230         otx_ep->nb_rx_queues--;
231
232         otx_ep_info("OQ[%d] is deleted\n", oq_no);
233         return 0;
234 }
235
236 static int
237 otx_ep_droq_setup_ring_buffers(struct otx_ep_droq *droq)
238 {
239         struct otx_ep_droq_desc *desc_ring = droq->desc_ring;
240         struct otx_ep_droq_info *info;
241         struct rte_mbuf *buf;
242         uint32_t idx;
243
244         for (idx = 0; idx < droq->nb_desc; idx++) {
245                 buf = rte_pktmbuf_alloc(droq->mpool);
246                 if (buf == NULL) {
247                         otx_ep_err("OQ buffer alloc failed\n");
248                         droq->stats.rx_alloc_failure++;
249                         return -ENOMEM;
250                 }
251
252                 droq->recv_buf_list[idx] = buf;
253                 info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
254                 memset(info, 0, sizeof(*info));
255                 desc_ring[idx].buffer_ptr = rte_mbuf_data_iova_default(buf);
256         }
257
258         otx_ep_droq_reset_indices(droq);
259
260         return 0;
261 }
262
263 /* OQ initialization */
264 static int
265 otx_ep_init_droq(struct otx_ep_device *otx_ep, uint32_t q_no,
266               uint32_t num_descs, uint32_t desc_size,
267               struct rte_mempool *mpool, unsigned int socket_id)
268 {
269         const struct otx_ep_config *conf = otx_ep->conf;
270         uint32_t c_refill_threshold;
271         struct otx_ep_droq *droq;
272         uint32_t desc_ring_size;
273
274         otx_ep_info("OQ[%d] Init start\n", q_no);
275
276         droq = otx_ep->droq[q_no];
277         droq->otx_ep_dev = otx_ep;
278         droq->q_no = q_no;
279         droq->mpool = mpool;
280
281         droq->nb_desc      = num_descs;
282         droq->buffer_size  = desc_size;
283         c_refill_threshold = RTE_MAX(conf->oq.refill_threshold,
284                                      droq->nb_desc / 2);
285
286         /* OQ desc_ring set up */
287         desc_ring_size = droq->nb_desc * OTX_EP_DROQ_DESC_SIZE;
288         droq->desc_ring_mz = rte_eth_dma_zone_reserve(otx_ep->eth_dev, "droq",
289                                                       q_no, desc_ring_size,
290                                                       OTX_EP_PCI_RING_ALIGN,
291                                                       socket_id);
292
293         if (droq->desc_ring_mz == NULL) {
294                 otx_ep_err("OQ:%d desc_ring allocation failed\n", q_no);
295                 goto init_droq_fail;
296         }
297
298         droq->desc_ring_dma = droq->desc_ring_mz->iova;
299         droq->desc_ring = (struct otx_ep_droq_desc *)droq->desc_ring_mz->addr;
300
301         otx_ep_dbg("OQ[%d]: desc_ring: virt: 0x%p, dma: %lx\n",
302                     q_no, droq->desc_ring, (unsigned long)droq->desc_ring_dma);
303         otx_ep_dbg("OQ[%d]: num_desc: %d\n", q_no, droq->nb_desc);
304
305         /* OQ buf_list set up */
306         droq->recv_buf_list = rte_zmalloc_socket("recv_buf_list",
307                                 (droq->nb_desc * sizeof(struct rte_mbuf *)),
308                                  RTE_CACHE_LINE_SIZE, socket_id);
309         if (droq->recv_buf_list == NULL) {
310                 otx_ep_err("OQ recv_buf_list alloc failed\n");
311                 goto init_droq_fail;
312         }
313
314         if (otx_ep_droq_setup_ring_buffers(droq))
315                 goto init_droq_fail;
316
317         droq->refill_threshold = c_refill_threshold;
318
319         /* Set up OQ registers */
320         otx_ep->fn_list.setup_oq_regs(otx_ep, q_no);
321
322         otx_ep->io_qmask.oq |= (1ull << q_no);
323
324         return 0;
325
326 init_droq_fail:
327         return -ENOMEM;
328 }
329
330 /* OQ configuration and setup */
331 int
332 otx_ep_setup_oqs(struct otx_ep_device *otx_ep, int oq_no, int num_descs,
333                  int desc_size, struct rte_mempool *mpool,
334                  unsigned int socket_id)
335 {
336         struct otx_ep_droq *droq;
337
338         /* Allocate new droq. */
339         droq = (struct otx_ep_droq *)rte_zmalloc("otx_ep_OQ",
340                                 sizeof(*droq), RTE_CACHE_LINE_SIZE);
341         if (droq == NULL) {
342                 otx_ep_err("Droq[%d] Creation Failed\n", oq_no);
343                 return -ENOMEM;
344         }
345         otx_ep->droq[oq_no] = droq;
346
347         if (otx_ep_init_droq(otx_ep, oq_no, num_descs, desc_size, mpool,
348                              socket_id)) {
349                 otx_ep_err("Droq[%d] Initialization failed\n", oq_no);
350                 goto delete_OQ;
351         }
352         otx_ep_info("OQ[%d] is created.\n", oq_no);
353
354         otx_ep->nb_rx_queues++;
355
356         return 0;
357
358 delete_OQ:
359         otx_ep_delete_oqs(otx_ep, oq_no);
360         return -ENOMEM;
361 }
362
363 static uint32_t
364 otx_ep_droq_refill(struct otx_ep_droq *droq)
365 {
366         struct otx_ep_droq_desc *desc_ring;
367         struct otx_ep_droq_info *info;
368         struct rte_mbuf *buf = NULL;
369         uint32_t desc_refilled = 0;
370
371         desc_ring = droq->desc_ring;
372
373         while (droq->refill_count && (desc_refilled < droq->nb_desc)) {
374                 /* If a valid buffer exists (happens if there is no dispatch),
375                  * reuse the buffer, else allocate.
376                  */
377                 if (droq->recv_buf_list[droq->refill_idx] != NULL)
378                         break;
379
380                 buf = rte_pktmbuf_alloc(droq->mpool);
381                 /* If a buffer could not be allocated, no point in
382                  * continuing
383                  */
384                 if (buf == NULL) {
385                         droq->stats.rx_alloc_failure++;
386                         break;
387                 }
388                 info = rte_pktmbuf_mtod(buf, struct otx_ep_droq_info *);
389                 memset(info, 0, sizeof(*info));
390
391                 droq->recv_buf_list[droq->refill_idx] = buf;
392                 desc_ring[droq->refill_idx].buffer_ptr =
393                                         rte_mbuf_data_iova_default(buf);
394
395
396                 droq->refill_idx = otx_ep_incr_index(droq->refill_idx, 1,
397                                 droq->nb_desc);
398
399                 desc_refilled++;
400                 droq->refill_count--;
401         }
402
403         return desc_refilled;
404 }
405
406 static struct rte_mbuf *
407 otx_ep_droq_read_packet(struct otx_ep_device *otx_ep,
408                         struct otx_ep_droq *droq, int next_fetch)
409 {
410         volatile struct otx_ep_droq_info *info;
411         struct rte_mbuf *droq_pkt2 = NULL;
412         struct rte_mbuf *droq_pkt = NULL;
413         struct rte_net_hdr_lens hdr_lens;
414         struct otx_ep_droq_info *info2;
415         uint64_t total_pkt_len;
416         uint32_t pkt_len = 0;
417         int next_idx;
418
419         droq_pkt  = droq->recv_buf_list[droq->read_idx];
420         droq_pkt2  = droq->recv_buf_list[droq->read_idx];
421         info = rte_pktmbuf_mtod(droq_pkt, struct otx_ep_droq_info *);
422         /* make sure info is available */
423         rte_rmb();
424         if (unlikely(!info->length)) {
425                 int retry = OTX_EP_MAX_DELAYED_PKT_RETRIES;
426                 /* otx_ep_dbg("OCTEON DROQ[%d]: read_idx: %d; Data not ready "
427                  * "yet, Retry; pending=%lu\n", droq->q_no, droq->read_idx,
428                  * droq->pkts_pending);
429                  */
430                 droq->stats.pkts_delayed_data++;
431                 while (retry && !info->length)
432                         retry--;
433                 if (!retry && !info->length) {
434                         otx_ep_err("OCTEON DROQ[%d]: read_idx: %d; Retry failed !!\n",
435                                    droq->q_no, droq->read_idx);
436                         /* May be zero length packet; drop it */
437                         rte_pktmbuf_free(droq_pkt);
438                         droq->recv_buf_list[droq->read_idx] = NULL;
439                         droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
440                                                            droq->nb_desc);
441                         droq->stats.dropped_zlp++;
442                         droq->refill_count++;
443                         goto oq_read_fail;
444                 }
445         }
446         if (next_fetch) {
447                 next_idx = otx_ep_incr_index(droq->read_idx, 1, droq->nb_desc);
448                 droq_pkt2  = droq->recv_buf_list[next_idx];
449                 info2 = rte_pktmbuf_mtod(droq_pkt2, struct otx_ep_droq_info *);
450                 rte_prefetch_non_temporal((const void *)info2);
451         }
452
453         info->length = rte_bswap64(info->length);
454         /* Deduce the actual data size */
455         total_pkt_len = info->length + INFO_SIZE;
456         if (total_pkt_len <= droq->buffer_size) {
457                 info->length -=  OTX_EP_RH_SIZE;
458                 droq_pkt  = droq->recv_buf_list[droq->read_idx];
459                 if (likely(droq_pkt != NULL)) {
460                         droq_pkt->data_off += OTX_EP_DROQ_INFO_SIZE;
461                         /* otx_ep_dbg("OQ: pkt_len[%ld], buffer_size %d\n",
462                          * (long)info->length, droq->buffer_size);
463                          */
464                         pkt_len = (uint32_t)info->length;
465                         droq_pkt->pkt_len  = pkt_len;
466                         droq_pkt->data_len  = pkt_len;
467                         droq_pkt->port = otx_ep->port_id;
468                         droq->recv_buf_list[droq->read_idx] = NULL;
469                         droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
470                                                            droq->nb_desc);
471                         droq->refill_count++;
472                 }
473         } else {
474                 struct rte_mbuf *first_buf = NULL;
475                 struct rte_mbuf *last_buf = NULL;
476
477                 while (pkt_len < total_pkt_len) {
478                         int cpy_len = 0;
479
480                         cpy_len = ((pkt_len + droq->buffer_size) >
481                                         total_pkt_len)
482                                         ? ((uint32_t)total_pkt_len -
483                                                 pkt_len)
484                                         : droq->buffer_size;
485
486                         droq_pkt = droq->recv_buf_list[droq->read_idx];
487                         droq->recv_buf_list[droq->read_idx] = NULL;
488
489                         if (likely(droq_pkt != NULL)) {
490                                 /* Note the first seg */
491                                 if (!pkt_len)
492                                         first_buf = droq_pkt;
493
494                                 droq_pkt->port = otx_ep->port_id;
495                                 if (!pkt_len) {
496                                         droq_pkt->data_off +=
497                                                 OTX_EP_DROQ_INFO_SIZE;
498                                         droq_pkt->pkt_len =
499                                                 cpy_len - OTX_EP_DROQ_INFO_SIZE;
500                                         droq_pkt->data_len =
501                                                 cpy_len - OTX_EP_DROQ_INFO_SIZE;
502                                 } else {
503                                         droq_pkt->pkt_len = cpy_len;
504                                         droq_pkt->data_len = cpy_len;
505                                 }
506
507                                 if (pkt_len) {
508                                         first_buf->nb_segs++;
509                                         first_buf->pkt_len += droq_pkt->pkt_len;
510                                 }
511
512                                 if (last_buf)
513                                         last_buf->next = droq_pkt;
514
515                                 last_buf = droq_pkt;
516                         } else {
517                                 otx_ep_err("no buf\n");
518                         }
519
520                         pkt_len += cpy_len;
521                         droq->read_idx = otx_ep_incr_index(droq->read_idx, 1,
522                                                            droq->nb_desc);
523                         droq->refill_count++;
524                 }
525                 droq_pkt = first_buf;
526         }
527         droq_pkt->packet_type = rte_net_get_ptype(droq_pkt, &hdr_lens,
528                                         RTE_PTYPE_ALL_MASK);
529         droq_pkt->l2_len = hdr_lens.l2_len;
530         droq_pkt->l3_len = hdr_lens.l3_len;
531         droq_pkt->l4_len = hdr_lens.l4_len;
532
533         if ((droq_pkt->pkt_len > (RTE_ETHER_MAX_LEN + OTX_CUST_DATA_LEN)) &&
534             !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)) {
535                 rte_pktmbuf_free(droq_pkt);
536                 goto oq_read_fail;
537         }
538
539         if (droq_pkt->nb_segs > 1 &&
540             !(otx_ep->rx_offloads & DEV_RX_OFFLOAD_SCATTER)) {
541                 rte_pktmbuf_free(droq_pkt);
542                 goto oq_read_fail;
543         }
544
545         return droq_pkt;
546
547 oq_read_fail:
548         return NULL;
549 }
550
551 static inline uint32_t
552 otx_ep_check_droq_pkts(struct otx_ep_droq *droq)
553 {
554         volatile uint64_t pkt_count;
555         uint32_t new_pkts;
556
557         /* Latest available OQ packets */
558         pkt_count = rte_read32(droq->pkts_sent_reg);
559         rte_write32(pkt_count, droq->pkts_sent_reg);
560         new_pkts = pkt_count;
561         droq->pkts_pending += new_pkts;
562         return new_pkts;
563 }
564
565 /* Check for response arrival from OCTEON TX2
566  * returns number of requests completed
567  */
568 uint16_t
569 otx_ep_recv_pkts(void *rx_queue,
570                   struct rte_mbuf **rx_pkts,
571                   uint16_t budget)
572 {
573         struct otx_ep_droq *droq = rx_queue;
574         struct otx_ep_device *otx_ep;
575         struct rte_mbuf *oq_pkt;
576
577         uint32_t pkts = 0;
578         uint32_t new_pkts = 0;
579         int next_fetch;
580
581         otx_ep = droq->otx_ep_dev;
582
583         if (droq->pkts_pending > budget) {
584                 new_pkts = budget;
585         } else {
586                 new_pkts = droq->pkts_pending;
587                 new_pkts += otx_ep_check_droq_pkts(droq);
588                 if (new_pkts > budget)
589                         new_pkts = budget;
590         }
591
592         if (!new_pkts)
593                 goto update_credit; /* No pkts at this moment */
594
595         for (pkts = 0; pkts < new_pkts; pkts++) {
596                 /* Push the received pkt to application */
597                 next_fetch = (pkts == new_pkts - 1) ? 0 : 1;
598                 oq_pkt = otx_ep_droq_read_packet(otx_ep, droq, next_fetch);
599                 if (!oq_pkt) {
600                         RTE_LOG_DP(ERR, PMD,
601                                    "DROQ read pkt failed pending %" PRIu64
602                                     "last_pkt_count %" PRIu64 "new_pkts %d.\n",
603                                    droq->pkts_pending, droq->last_pkt_count,
604                                    new_pkts);
605                         droq->pkts_pending -= pkts;
606                         droq->stats.rx_err++;
607                         goto finish;
608                 }
609                 rx_pkts[pkts] = oq_pkt;
610                 /* Stats */
611                 droq->stats.pkts_received++;
612                 droq->stats.bytes_received += oq_pkt->pkt_len;
613         }
614         droq->pkts_pending -= pkts;
615
616         /* Refill DROQ buffers */
617 update_credit:
618         if (droq->refill_count >= DROQ_REFILL_THRESHOLD) {
619                 int desc_refilled = otx_ep_droq_refill(droq);
620
621                 /* Flush the droq descriptor data to memory to be sure
622                  * that when we update the credits the data in memory is
623                  * accurate.
624                  */
625                 rte_wmb();
626                 rte_write32(desc_refilled, droq->pkts_credit_reg);
627         } else {
628                 /*
629                  * SDP output goes into DROP state when output doorbell count
630                  * goes below drop count. When door bell count is written with
631                  * a value greater than drop count SDP output should come out
632                  * of DROP state. Due to a race condition this is not happening.
633                  * Writing doorbell register with 0 again may make SDP output
634                  * come out of this state.
635                  */
636
637                 rte_write32(0, droq->pkts_credit_reg);
638         }
639 finish:
640         return pkts;
641 }