21a9af41a18900a0de6889bc3b0659459f83be3f
[dpdk.git] / drivers / net / ark / ark_ethdev_rx.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2015-2018 Atomic Rules LLC
3  */
4
5 #include <unistd.h>
6
7 #include "rte_pmd_ark.h"
8 #include "ark_ethdev_rx.h"
9 #include "ark_global.h"
10 #include "ark_logs.h"
11 #include "ark_mpu.h"
12 #include "ark_udm.h"
13
14 #define ARK_RX_META_SIZE 32
15 #define ARK_RX_META_OFFSET (RTE_PKTMBUF_HEADROOM - ARK_RX_META_SIZE)
16 #define ARK_RX_MAX_NOCHAIN (RTE_MBUF_DEFAULT_DATAROOM)
17
18 /* Forward declarations */
19 struct ark_rx_queue;
20 struct ark_rx_meta;
21
22 static void dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi);
23 static void ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue);
24 static uint32_t eth_ark_rx_jumbo(struct ark_rx_queue *queue,
25                                  struct ark_rx_meta *meta,
26                                  struct rte_mbuf *mbuf0,
27                                  uint32_t cons_index);
28 static inline int eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue);
29
30 /* ************************************************************************* */
31 struct ark_rx_queue {
32         /* array of mbufs to populate */
33         struct rte_mbuf **reserve_q;
34         /* array of physical addresses of the mbuf data pointer */
35         /* This point is a virtual address */
36         rte_iova_t *paddress_q;
37         struct rte_mempool *mb_pool;
38
39         struct ark_udm_t *udm;
40         struct ark_mpu_t *mpu;
41
42         uint32_t queue_size;
43         uint32_t queue_mask;
44
45         uint32_t seed_index;            /* step 1 set with empty mbuf */
46         uint32_t cons_index;            /* step 3 consumed by driver */
47
48         /* The queue Id is used to identify the HW Q */
49         uint16_t phys_qid;
50
51         /* The queue Index is used within the dpdk device structures */
52         uint16_t queue_index;
53
54         uint32_t unused;
55
56         /* separate cache line */
57         /* second cache line - fields only used in slow path */
58         RTE_MARKER cacheline1 __rte_cache_min_aligned;
59
60         volatile uint32_t prod_index;   /* step 2 filled by FPGA */
61 } __rte_cache_aligned;
62
63 /* ************************************************************************* */
64 static int
65 eth_ark_rx_hw_setup(struct rte_eth_dev *dev,
66                     struct ark_rx_queue *queue,
67                     uint16_t rx_queue_id __rte_unused, uint16_t rx_queue_idx)
68 {
69         rte_iova_t queue_base;
70         rte_iova_t phys_addr_q_base;
71         rte_iova_t phys_addr_prod_index;
72
73         queue_base = rte_malloc_virt2iova(queue);
74         phys_addr_prod_index = queue_base +
75                 offsetof(struct ark_rx_queue, prod_index);
76
77         phys_addr_q_base = rte_malloc_virt2iova(queue->paddress_q);
78
79         /* Verify HW */
80         if (ark_mpu_verify(queue->mpu, sizeof(rte_iova_t))) {
81                 ARK_PMD_LOG(ERR, "Illegal configuration rx queue\n");
82                 return -1;
83         }
84
85         /* Stop and Reset and configure MPU */
86         ark_mpu_configure(queue->mpu, phys_addr_q_base, queue->queue_size, 0);
87
88         ark_udm_write_addr(queue->udm, phys_addr_prod_index);
89
90         /* advance the valid pointer, but don't start until the queue starts */
91         ark_mpu_reset_stats(queue->mpu);
92
93         /* The seed is the producer index for the HW */
94         ark_mpu_set_producer(queue->mpu, queue->seed_index);
95         dev->data->rx_queue_state[rx_queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
96
97         return 0;
98 }
99
100 static inline void
101 eth_ark_rx_update_cons_index(struct ark_rx_queue *queue, uint32_t cons_index)
102 {
103         queue->cons_index = cons_index;
104         if ((cons_index + queue->queue_size - queue->seed_index) >= 64U) {
105                 eth_ark_rx_seed_mbufs(queue);
106                 ark_mpu_set_producer(queue->mpu, queue->seed_index);
107         }
108 }
109
110 /* ************************************************************************* */
111 int
112 eth_ark_dev_rx_queue_setup(struct rte_eth_dev *dev,
113                            uint16_t queue_idx,
114                            uint16_t nb_desc,
115                            unsigned int socket_id,
116                            const struct rte_eth_rxconf *rx_conf,
117                            struct rte_mempool *mb_pool)
118 {
119         static int warning1;            /* = 0 */
120         struct ark_adapter *ark = dev->data->dev_private;
121
122         struct ark_rx_queue *queue;
123         uint32_t i;
124         int status;
125
126         int qidx = queue_idx;
127
128         /* We may already be setup, free memory prior to re-allocation */
129         if (dev->data->rx_queues[queue_idx] != NULL) {
130                 eth_ark_dev_rx_queue_release(dev->data->rx_queues[queue_idx]);
131                 dev->data->rx_queues[queue_idx] = NULL;
132         }
133
134         if (rx_conf != NULL && warning1 == 0) {
135                 warning1 = 1;
136                 ARK_PMD_LOG(NOTICE,
137                             "Arkville ignores rte_eth_rxconf argument.\n");
138         }
139
140         if (RTE_PKTMBUF_HEADROOM < ARK_RX_META_SIZE) {
141                 ARK_PMD_LOG(ERR,
142                             "Error: DPDK Arkville requires head room > %d bytes (%s)\n",
143                             ARK_RX_META_SIZE, __func__);
144                 return -1;              /* ERROR CODE */
145         }
146
147         if (!rte_is_power_of_2(nb_desc)) {
148                 ARK_PMD_LOG(ERR,
149                             "DPDK Arkville configuration queue size must be power of two %u (%s)\n",
150                             nb_desc, __func__);
151                 return -1;              /* ERROR CODE */
152         }
153
154         /* Allocate queue struct */
155         queue = rte_zmalloc_socket("Ark_rxqueue",
156                                    sizeof(struct ark_rx_queue),
157                                    64,
158                                    socket_id);
159         if (queue == 0) {
160                 ARK_PMD_LOG(ERR, "Failed to allocate memory in %s\n", __func__);
161                 return -ENOMEM;
162         }
163
164         /* NOTE zmalloc is used, no need to 0 indexes, etc. */
165         queue->mb_pool = mb_pool;
166         queue->phys_qid = qidx;
167         queue->queue_index = queue_idx;
168         queue->queue_size = nb_desc;
169         queue->queue_mask = nb_desc - 1;
170
171         queue->reserve_q =
172                 rte_zmalloc_socket("Ark_rx_queue mbuf",
173                                    nb_desc * sizeof(struct rte_mbuf *),
174                                    64,
175                                    socket_id);
176         queue->paddress_q =
177                 rte_zmalloc_socket("Ark_rx_queue paddr",
178                                    nb_desc * sizeof(rte_iova_t),
179                                    64,
180                                    socket_id);
181
182         if (queue->reserve_q == 0 || queue->paddress_q == 0) {
183                 ARK_PMD_LOG(ERR,
184                             "Failed to allocate queue memory in %s\n",
185                             __func__);
186                 rte_free(queue->reserve_q);
187                 rte_free(queue->paddress_q);
188                 rte_free(queue);
189                 return -ENOMEM;
190         }
191
192         dev->data->rx_queues[queue_idx] = queue;
193         queue->udm = RTE_PTR_ADD(ark->udm.v, qidx * ARK_UDM_QOFFSET);
194         queue->mpu = RTE_PTR_ADD(ark->mpurx.v, qidx * ARK_MPU_QOFFSET);
195
196         /* populate mbuf reserve */
197         status = eth_ark_rx_seed_mbufs(queue);
198
199         if (queue->seed_index != nb_desc) {
200                 ARK_PMD_LOG(ERR, "Failed to allocate %u mbufs for RX queue %d\n",
201                             nb_desc, qidx);
202                 status = -1;
203         }
204         /* MPU Setup */
205         if (status == 0)
206                 status = eth_ark_rx_hw_setup(dev, queue, qidx, queue_idx);
207
208         if (unlikely(status != 0)) {
209                 struct rte_mbuf **mbuf;
210
211                 ARK_PMD_LOG(ERR, "Failed to initialize RX queue %d %s\n",
212                             qidx,
213                             __func__);
214                 /* Free the mbufs allocated */
215                 for (i = 0, mbuf = queue->reserve_q;
216                      i < queue->seed_index; ++i, mbuf++) {
217                         rte_pktmbuf_free(*mbuf);
218                 }
219                 rte_free(queue->reserve_q);
220                 rte_free(queue->paddress_q);
221                 rte_free(queue);
222                 return -1;              /* ERROR CODE */
223         }
224
225         return 0;
226 }
227
228 /* ************************************************************************* */
229 uint16_t
230 eth_ark_recv_pkts_noop(void *rx_queue __rte_unused,
231                        struct rte_mbuf **rx_pkts __rte_unused,
232                        uint16_t nb_pkts __rte_unused)
233 {
234         return 0;
235 }
236
237 /* ************************************************************************* */
238 uint16_t
239 eth_ark_recv_pkts(void *rx_queue,
240                   struct rte_mbuf **rx_pkts,
241                   uint16_t nb_pkts)
242 {
243         struct ark_rx_queue *queue;
244         register uint32_t cons_index, prod_index;
245         uint16_t nb;
246         struct rte_mbuf *mbuf;
247         struct ark_rx_meta *meta;
248
249         queue = (struct ark_rx_queue *)rx_queue;
250         if (unlikely(queue == 0))
251                 return 0;
252         if (unlikely(nb_pkts == 0))
253                 return 0;
254         prod_index = queue->prod_index;
255         cons_index = queue->cons_index;
256         nb = 0;
257
258         while (prod_index != cons_index) {
259                 mbuf = queue->reserve_q[cons_index & queue->queue_mask];
260                 /* prefetch mbuf */
261                 rte_mbuf_prefetch_part1(mbuf);
262                 rte_mbuf_prefetch_part2(mbuf);
263
264                 /* META DATA embedded in headroom */
265                 meta = RTE_PTR_ADD(mbuf->buf_addr, ARK_RX_META_OFFSET);
266
267                 mbuf->pkt_len = meta->pkt_len;
268                 mbuf->data_len = meta->pkt_len;
269                 /* set timestamp if enabled at least on one device */
270                 if (ark_timestamp_rx_dynflag > 0) {
271                         *RTE_MBUF_DYNFIELD(mbuf, ark_timestamp_dynfield_offset,
272                                 rte_mbuf_timestamp_t *) = meta->timestamp;
273                         mbuf->ol_flags |= ark_timestamp_rx_dynflag;
274                 }
275                 rte_pmd_ark_mbuf_rx_userdata_set(mbuf, meta->user_data);
276
277                 if (ARK_DEBUG_CORE) {   /* debug sanity checks */
278                         if ((meta->pkt_len > (1024 * 16)) ||
279                             (meta->pkt_len == 0)) {
280                                 ARK_PMD_LOG(DEBUG, "RX: Bad Meta Q: %u"
281                                            " cons: %" PRIU32
282                                            " prod: %" PRIU32
283                                            " seed_index %" PRIU32
284                                            "\n",
285                                            queue->phys_qid,
286                                            cons_index,
287                                            queue->prod_index,
288                                            queue->seed_index);
289
290
291                                 ARK_PMD_LOG(DEBUG, "       :  UDM"
292                                            " prod: %" PRIU32
293                                            " len: %u\n",
294                                            queue->udm->rt_cfg.prod_idx,
295                                            meta->pkt_len);
296                                 ark_mpu_dump(queue->mpu,
297                                              "    ",
298                                              queue->phys_qid);
299                                 dump_mbuf_data(mbuf, 0, 256);
300                                 /* its FUBAR so fix it */
301                                 mbuf->pkt_len = 63;
302                                 meta->pkt_len = 63;
303                         }
304                 }
305
306                 if (unlikely(meta->pkt_len > ARK_RX_MAX_NOCHAIN))
307                         cons_index = eth_ark_rx_jumbo
308                                 (queue, meta, mbuf, cons_index + 1);
309                 else
310                         cons_index += 1;
311
312                 rx_pkts[nb] = mbuf;
313                 nb++;
314                 if (nb >= nb_pkts)
315                         break;
316         }
317
318         eth_ark_rx_update_cons_index(queue, cons_index);
319
320         return nb;
321 }
322
323 /* ************************************************************************* */
324 static uint32_t
325 eth_ark_rx_jumbo(struct ark_rx_queue *queue,
326                  struct ark_rx_meta *meta,
327                  struct rte_mbuf *mbuf0,
328                  uint32_t cons_index)
329 {
330         struct rte_mbuf *mbuf_prev;
331         struct rte_mbuf *mbuf;
332
333         uint16_t remaining;
334         uint16_t data_len;
335         uint16_t segments;
336
337         /* first buf populated by called */
338         mbuf_prev = mbuf0;
339         segments = 1;
340         data_len = RTE_MIN(meta->pkt_len, RTE_MBUF_DEFAULT_DATAROOM);
341         remaining = meta->pkt_len - data_len;
342         mbuf0->data_len = data_len;
343
344         /* HW guarantees that the data does not exceed prod_index! */
345         while (remaining != 0) {
346                 data_len = RTE_MIN(remaining,
347                                    RTE_MBUF_DEFAULT_DATAROOM);
348
349                 remaining -= data_len;
350                 segments += 1;
351
352                 mbuf = queue->reserve_q[cons_index & queue->queue_mask];
353                 mbuf_prev->next = mbuf;
354                 mbuf_prev = mbuf;
355                 mbuf->data_len = data_len;
356
357                 cons_index += 1;
358         }
359
360         mbuf0->nb_segs = segments;
361         return cons_index;
362 }
363
364 /* Drain the internal queue allowing hw to clear out. */
365 static void
366 eth_ark_rx_queue_drain(struct ark_rx_queue *queue)
367 {
368         register uint32_t cons_index;
369         struct rte_mbuf *mbuf;
370
371         cons_index = queue->cons_index;
372
373         /* NOT performance optimized, since this is a one-shot call */
374         while ((cons_index ^ queue->prod_index) & queue->queue_mask) {
375                 mbuf = queue->reserve_q[cons_index & queue->queue_mask];
376                 rte_pktmbuf_free(mbuf);
377                 cons_index++;
378                 eth_ark_rx_update_cons_index(queue, cons_index);
379         }
380 }
381
382 uint32_t
383 eth_ark_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_id)
384 {
385         struct ark_rx_queue *queue;
386
387         queue = dev->data->rx_queues[queue_id];
388         return (queue->prod_index - queue->cons_index); /* mod arith */
389 }
390
391 /* ************************************************************************* */
392 int
393 eth_ark_rx_start_queue(struct rte_eth_dev *dev, uint16_t queue_id)
394 {
395         struct ark_rx_queue *queue;
396
397         queue = dev->data->rx_queues[queue_id];
398         if (queue == 0)
399                 return -1;
400
401         dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
402
403         ark_mpu_set_producer(queue->mpu, queue->seed_index);
404         ark_mpu_start(queue->mpu);
405
406         ark_udm_queue_enable(queue->udm, 1);
407
408         return 0;
409 }
410
411 /* ************************************************************************* */
412
413 /* Queue can be restarted.   data remains
414  */
415 int
416 eth_ark_rx_stop_queue(struct rte_eth_dev *dev, uint16_t queue_id)
417 {
418         struct ark_rx_queue *queue;
419
420         queue = dev->data->rx_queues[queue_id];
421         if (queue == 0)
422                 return -1;
423
424         ark_udm_queue_enable(queue->udm, 0);
425
426         dev->data->rx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
427
428         return 0;
429 }
430
431 /* ************************************************************************* */
432 static inline int
433 eth_ark_rx_seed_mbufs(struct ark_rx_queue *queue)
434 {
435         uint32_t limit = queue->cons_index + queue->queue_size;
436         uint32_t seed_index = queue->seed_index;
437
438         uint32_t count = 0;
439         uint32_t seed_m = queue->seed_index & queue->queue_mask;
440
441         uint32_t nb = limit - seed_index;
442
443         /* Handle wrap around -- remainder is filled on the next call */
444         if (unlikely(seed_m + nb > queue->queue_size))
445                 nb = queue->queue_size - seed_m;
446
447         struct rte_mbuf **mbufs = &queue->reserve_q[seed_m];
448         int status = rte_pktmbuf_alloc_bulk(queue->mb_pool, mbufs, nb);
449
450         if (unlikely(status != 0)) {
451                 ARK_PMD_LOG(NOTICE,
452                             "Could not allocate %u mbufs from pool"
453                             " for RX queue %u;"
454                             " %u free buffers remaining in queue\n",
455                             nb, queue->queue_index,
456                             queue->seed_index - queue->cons_index);
457                 return -1;
458         }
459
460         if (ARK_DEBUG_CORE) {           /* DEBUG */
461                 while (count != nb) {
462                         struct rte_mbuf *mbuf_init =
463                                 queue->reserve_q[seed_m + count];
464
465                         memset(mbuf_init->buf_addr, -1, 512);
466                         *((uint32_t *)mbuf_init->buf_addr) =
467                                 seed_index + count;
468                         *(uint16_t *)RTE_PTR_ADD(mbuf_init->buf_addr, 4) =
469                                 queue->phys_qid;
470                         count++;
471                 }
472                 count = 0;
473         } /* DEBUG */
474         queue->seed_index += nb;
475
476         /* Duff's device https://en.wikipedia.org/wiki/Duff's_device */
477         switch (nb % 4) {
478         case 0:
479                 while (count != nb) {
480                         queue->paddress_q[seed_m++] =
481                                 (*mbufs++)->buf_iova;
482                         count++;
483                 /* FALLTHROUGH */
484         case 3:
485                 queue->paddress_q[seed_m++] =
486                         (*mbufs++)->buf_iova;
487                 count++;
488                 /* FALLTHROUGH */
489         case 2:
490                 queue->paddress_q[seed_m++] =
491                         (*mbufs++)->buf_iova;
492                 count++;
493                 /* FALLTHROUGH */
494         case 1:
495                 queue->paddress_q[seed_m++] =
496                         (*mbufs++)->buf_iova;
497                 count++;
498                 /* FALLTHROUGH */
499
500                 } /* while (count != nb) */
501         } /* switch */
502
503         return 0;
504 }
505
506 void
507 eth_ark_rx_dump_queue(struct rte_eth_dev *dev, uint16_t queue_id,
508                       const char *msg)
509 {
510         struct ark_rx_queue *queue;
511
512         queue = dev->data->rx_queues[queue_id];
513
514         ark_ethdev_rx_dump(msg, queue);
515 }
516
517 /* ************************************************************************* */
518 /* Call on device closed no user API, queue is stopped */
519 void
520 eth_ark_dev_rx_queue_release(void *vqueue)
521 {
522         struct ark_rx_queue *queue;
523         uint32_t i;
524
525         queue = (struct ark_rx_queue *)vqueue;
526         if (queue == 0)
527                 return;
528
529         ark_udm_queue_enable(queue->udm, 0);
530         /* Stop the MPU since pointer are going away */
531         ark_mpu_stop(queue->mpu);
532
533         /* Need to clear out mbufs here, dropping packets along the way */
534         eth_ark_rx_queue_drain(queue);
535
536         for (i = 0; i < queue->queue_size; ++i)
537                 rte_pktmbuf_free(queue->reserve_q[i]);
538
539         rte_free(queue->reserve_q);
540         rte_free(queue->paddress_q);
541         rte_free(queue);
542 }
543
544 void
545 eth_rx_queue_stats_get(void *vqueue, struct rte_eth_stats *stats)
546 {
547         struct ark_rx_queue *queue;
548         struct ark_udm_t *udm;
549
550         queue = vqueue;
551         if (queue == 0)
552                 return;
553         udm = queue->udm;
554
555         uint64_t ibytes = ark_udm_bytes(udm);
556         uint64_t ipackets = ark_udm_packets(udm);
557         uint64_t idropped = ark_udm_dropped(queue->udm);
558
559         stats->q_ipackets[queue->queue_index] = ipackets;
560         stats->q_ibytes[queue->queue_index] = ibytes;
561         stats->q_errors[queue->queue_index] = idropped;
562         stats->ipackets += ipackets;
563         stats->ibytes += ibytes;
564         stats->imissed += idropped;
565 }
566
567 void
568 eth_rx_queue_stats_reset(void *vqueue)
569 {
570         struct ark_rx_queue *queue;
571
572         queue = vqueue;
573         if (queue == 0)
574                 return;
575
576         ark_mpu_reset_stats(queue->mpu);
577         ark_udm_queue_stats_reset(queue->udm);
578 }
579
580 void
581 eth_ark_udm_force_close(struct rte_eth_dev *dev)
582 {
583         struct ark_adapter *ark = dev->data->dev_private;
584         struct ark_rx_queue *queue;
585         uint32_t index;
586         uint16_t i;
587
588         if (!ark_udm_is_flushed(ark->udm.v)) {
589                 /* restart the MPUs */
590                 ARK_PMD_LOG(NOTICE, "UDM not flushed -- forcing flush\n");
591                 for (i = 0; i < dev->data->nb_rx_queues; i++) {
592                         queue = (struct ark_rx_queue *)dev->data->rx_queues[i];
593                         if (queue == 0)
594                                 continue;
595
596                         ark_mpu_start(queue->mpu);
597                         /* Add some buffers */
598                         index = 100000 + queue->seed_index;
599                         ark_mpu_set_producer(queue->mpu, index);
600                 }
601                 /* Wait to allow data to pass */
602                 usleep(100);
603
604                 ARK_PMD_LOG(DEBUG, "UDM forced flush attempt, stopped = %d\n",
605                                 ark_udm_is_flushed(ark->udm.v));
606         }
607         ark_udm_reset(ark->udm.v);
608 }
609
610 static void
611 ark_ethdev_rx_dump(const char *name, struct ark_rx_queue *queue)
612 {
613         if (queue == NULL)
614                 return;
615         ARK_PMD_LOG(DEBUG, "RX QUEUE %d -- %s", queue->phys_qid, name);
616         ARK_PMD_LOG(DEBUG, ARK_SU32 ARK_SU32 ARK_SU32 ARK_SU32 "\n",
617                         "queue_size", queue->queue_size,
618                         "seed_index", queue->seed_index,
619                         "prod_index", queue->prod_index,
620                         "cons_index", queue->cons_index);
621
622         ark_mpu_dump(queue->mpu, name, queue->phys_qid);
623         ark_mpu_dump_setup(queue->mpu, queue->phys_qid);
624         ark_udm_dump(queue->udm, name);
625         ark_udm_dump_setup(queue->udm, queue->phys_qid);
626 }
627
628 /* Only used in debug.
629  * This function is a raw memory dump of a portion of an mbuf's memory
630  * region.  The usual function, rte_pktmbuf_dump() only shows data
631  * with respect to the data_off field.  This function show data
632  * anywhere in the mbuf's buffer.  This is useful for examining
633  * data in the headroom or tailroom portion of an mbuf.
634  */
635 static void
636 dump_mbuf_data(struct rte_mbuf *mbuf, uint16_t lo, uint16_t hi)
637 {
638         uint16_t i, j;
639
640         ARK_PMD_LOG(DEBUG, " MBUF: %p len %d, off: %d\n",
641                     mbuf, mbuf->pkt_len, mbuf->data_off);
642         for (i = lo; i < hi; i += 16) {
643                 uint8_t *dp = RTE_PTR_ADD(mbuf->buf_addr, i);
644
645                 ARK_PMD_LOG(DEBUG, "  %6d:  ", i);
646                 for (j = 0; j < 16; j++)
647                         ARK_PMD_LOG(DEBUG, " %02x", dp[j]);
648
649                 ARK_PMD_LOG(DEBUG, "\n");
650         }
651 }