remove extra parentheses in return statement
[dpdk.git] / drivers / net / mpipe / mpipe_tilegx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015 EZchip Semiconductor Ltd. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of EZchip Semiconductor nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <unistd.h>
34
35 #include <rte_eal.h>
36 #include <rte_dev.h>
37 #include <rte_eal_memconfig.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41
42 #include <arch/mpipe_xaui_def.h>
43 #include <arch/mpipe_gbe_def.h>
44
45 #include <gxio/mpipe.h>
46
47 #ifdef RTE_LIBRTE_MPIPE_PMD_DEBUG
48 #define PMD_DEBUG_RX(...)       RTE_LOG(DEBUG, PMD, __VA_ARGS__)
49 #define PMD_DEBUG_TX(...)       RTE_LOG(DEBUG, PMD, __VA_ARGS__)
50 #else
51 #define PMD_DEBUG_RX(...)
52 #define PMD_DEBUG_TX(...)
53 #endif
54
55 #define MPIPE_MAX_CHANNELS              128
56 #define MPIPE_TX_MAX_QUEUES             128
57 #define MPIPE_RX_MAX_QUEUES             16
58 #define MPIPE_TX_DESCS                  512
59 #define MPIPE_RX_BUCKETS                256
60 #define MPIPE_RX_STACK_SIZE             65536
61 #define MPIPE_RX_IP_ALIGN               2
62 #define MPIPE_BSM_ALIGN                 128
63
64 #define MPIPE_LINK_UPDATE_TIMEOUT       10      /*  s */
65 #define MPIPE_LINK_UPDATE_INTERVAL      100000  /* us */
66
67 struct mpipe_channel_config {
68         int enable;
69         int first_bucket;
70         int num_buckets;
71         int head_room;
72         gxio_mpipe_rules_stacks_t stacks;
73 };
74
75 struct mpipe_context {
76         rte_spinlock_t        lock;
77         gxio_mpipe_context_t  context;
78         struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
79 };
80
81 static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
82 static int mpipe_instances;
83 static const char *drivername = "MPIPE PMD";
84
85 /* Per queue statistics. */
86 struct mpipe_queue_stats {
87         uint64_t packets, bytes, errors, nomem;
88 };
89
90 /* Common tx/rx queue fields. */
91 struct mpipe_queue {
92         struct mpipe_dev_priv *priv;    /* "priv" data of its device. */
93         uint16_t nb_desc;               /* Number of tx descriptors. */
94         uint16_t port_id;               /* Device index. */
95         uint16_t stat_idx;              /* Queue stats index. */
96         uint8_t queue_idx;              /* Queue index. */
97         uint8_t link_status;            /* 0 = link down. */
98         struct mpipe_queue_stats stats; /* Stat data for the queue. */
99 };
100
101 /* Transmit queue description. */
102 struct mpipe_tx_queue {
103         struct mpipe_queue q;           /* Common stuff. */
104 };
105
106 /* Receive queue description. */
107 struct mpipe_rx_queue {
108         struct mpipe_queue q;           /* Common stuff. */
109         gxio_mpipe_iqueue_t iqueue;     /* mPIPE iqueue. */
110         gxio_mpipe_idesc_t *next_desc;  /* Next idesc to process. */
111         int avail_descs;                /* Number of available descs. */
112         void *rx_ring_mem;              /* DMA ring memory. */
113 };
114
115 struct mpipe_dev_priv {
116         gxio_mpipe_context_t *context;  /* mPIPE context. */
117         gxio_mpipe_link_t link;         /* mPIPE link for the device. */
118         gxio_mpipe_equeue_t equeue;     /* mPIPE equeue. */
119         unsigned equeue_size;           /* mPIPE equeue desc count. */
120         int instance;                   /* mPIPE instance. */
121         int ering;                      /* mPIPE eDMA ring. */
122         int stack;                      /* mPIPE buffer stack. */
123         int channel;                    /* Device channel. */
124         int port_id;                    /* DPDK port index. */
125         struct rte_eth_dev *eth_dev;    /* DPDK device. */
126         struct rte_mbuf **tx_comps;     /* TX completion array. */
127         struct rte_mempool *rx_mpool;   /* mpool used by the rx queues. */
128         unsigned rx_offset;             /* Receive head room. */
129         unsigned rx_size_code;          /* mPIPE rx buffer size code. */
130         unsigned rx_buffers;            /* receive buffers on stack. */
131         int is_xaui:1,                  /* Is this an xgbe or gbe? */
132             initialized:1,              /* Initialized port? */
133             running:1;                  /* Running port? */
134         struct ether_addr mac_addr;     /* MAC address. */
135         unsigned nb_rx_queues;          /* Configured tx queues. */
136         unsigned nb_tx_queues;          /* Configured rx queues. */
137         int first_bucket;               /* mPIPE bucket start index. */
138         int first_ring;                 /* mPIPE notif ring start index. */
139         int notif_group;                /* mPIPE notif group. */
140         rte_atomic32_t dp_count;        /* Active datapath thread count. */
141         int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
142         int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
143 };
144
145 #define mpipe_priv(dev)                 \
146         ((struct mpipe_dev_priv*)(dev)->data->dev_private)
147
148 #define mpipe_name(priv)                \
149         ((priv)->eth_dev->data->name)
150
151 #define mpipe_rx_queue(priv, n)         \
152         ((struct mpipe_rx_queue *)(priv)->eth_dev->data->rx_queues[n])
153
154 #define mpipe_tx_queue(priv, n)         \
155         ((struct mpipe_tx_queue *)(priv)->eth_dev->data->tx_queues[n])
156
157 static void
158 mpipe_xmit_flush(struct mpipe_dev_priv *priv);
159
160 static void
161 mpipe_recv_flush(struct mpipe_dev_priv *priv);
162
163 static int mpipe_equeue_sizes[] = {
164         [GXIO_MPIPE_EQUEUE_ENTRY_512]   = 512,
165         [GXIO_MPIPE_EQUEUE_ENTRY_2K]    = 2048,
166         [GXIO_MPIPE_EQUEUE_ENTRY_8K]    = 8192,
167         [GXIO_MPIPE_EQUEUE_ENTRY_64K]   = 65536,
168 };
169
170 static int mpipe_iqueue_sizes[] = {
171         [GXIO_MPIPE_IQUEUE_ENTRY_128]   = 128,
172         [GXIO_MPIPE_IQUEUE_ENTRY_512]   = 512,
173         [GXIO_MPIPE_IQUEUE_ENTRY_2K]    = 2048,
174         [GXIO_MPIPE_IQUEUE_ENTRY_64K]   = 65536,
175 };
176
177 static int mpipe_buffer_sizes[] = {
178         [GXIO_MPIPE_BUFFER_SIZE_128]    = 128,
179         [GXIO_MPIPE_BUFFER_SIZE_256]    = 256,
180         [GXIO_MPIPE_BUFFER_SIZE_512]    = 512,
181         [GXIO_MPIPE_BUFFER_SIZE_1024]   = 1024,
182         [GXIO_MPIPE_BUFFER_SIZE_1664]   = 1664,
183         [GXIO_MPIPE_BUFFER_SIZE_4096]   = 4096,
184         [GXIO_MPIPE_BUFFER_SIZE_10368]  = 10368,
185         [GXIO_MPIPE_BUFFER_SIZE_16384]  = 16384,
186 };
187
188 static gxio_mpipe_context_t *
189 mpipe_context(int instance)
190 {
191         if (instance < 0 || instance >= mpipe_instances)
192                 return NULL;
193         return &mpipe_contexts[instance].context;
194 }
195
196 static int mpipe_channel_config(int instance, int channel,
197                                 struct mpipe_channel_config *config)
198 {
199         struct mpipe_channel_config *data;
200         struct mpipe_context *context;
201         gxio_mpipe_rules_t rules;
202         int idx, rc = 0;
203
204         if (instance < 0 || instance >= mpipe_instances ||
205             channel < 0 || channel >= MPIPE_MAX_CHANNELS)
206                 return -EINVAL;
207
208         context = &mpipe_contexts[instance];
209
210         rte_spinlock_lock(&context->lock);
211
212         gxio_mpipe_rules_init(&rules, &context->context);
213
214         for (idx = 0; idx < MPIPE_MAX_CHANNELS; idx++) {
215                 data = (channel == idx) ? config : &context->channels[idx];
216
217                 if (!data->enable)
218                         continue;
219
220                 rc = gxio_mpipe_rules_begin(&rules, data->first_bucket,
221                                             data->num_buckets, &data->stacks);
222                 if (rc < 0) {
223                         goto done;
224                 }
225
226                 rc = gxio_mpipe_rules_add_channel(&rules, idx);
227                 if (rc < 0) {
228                         goto done;
229                 }
230
231                 rc = gxio_mpipe_rules_set_headroom(&rules, data->head_room);
232                 if (rc < 0) {
233                         goto done;
234                 }
235         }
236
237         rc = gxio_mpipe_rules_commit(&rules);
238         if (rc == 0) {
239                 memcpy(&context->channels[channel], config, sizeof(*config));
240         }
241
242 done:
243         rte_spinlock_unlock(&context->lock);
244
245         return rc;
246 }
247
248 static int
249 mpipe_get_size_index(int *array, int count, int size,
250                      bool roundup)
251 {
252         int i, last = -1;
253
254         for (i = 0; i < count && array[i] < size; i++) {
255                 if (array[i])
256                         last = i;
257         }
258
259         if (roundup)
260                 return i < count ? (int)i : -ENOENT;
261         else
262                 return last >= 0 ? last : -ENOENT;
263 }
264
265 static int
266 mpipe_calc_size(int *array, int count, int size)
267 {
268         int index = mpipe_get_size_index(array, count, size, 1);
269         return index < 0 ? index : array[index];
270 }
271
272 static int mpipe_equeue_size(int size)
273 {
274         int result;
275         result = mpipe_calc_size(mpipe_equeue_sizes,
276                                  RTE_DIM(mpipe_equeue_sizes), size);
277         return result;
278 }
279
280 static int mpipe_iqueue_size(int size)
281 {
282         int result;
283         result = mpipe_calc_size(mpipe_iqueue_sizes,
284                                  RTE_DIM(mpipe_iqueue_sizes), size);
285         return result;
286 }
287
288 static int mpipe_buffer_size_index(int size)
289 {
290         int result;
291         result = mpipe_get_size_index(mpipe_buffer_sizes,
292                                       RTE_DIM(mpipe_buffer_sizes), size, 0);
293         return result;
294 }
295
296 static inline int
297 mpipe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
298                                   struct rte_eth_link *link)
299 {
300         struct rte_eth_link *dst = link;
301         struct rte_eth_link *src = &(dev->data->dev_link);
302
303         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
304                                 *(uint64_t *)src) == 0)
305                 return -1;
306
307         return 0;
308 }
309
310 static inline int
311 mpipe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
312                                    struct rte_eth_link *link)
313 {
314         struct rte_eth_link *dst = &(dev->data->dev_link);
315         struct rte_eth_link *src = link;
316
317         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
318                                 *(uint64_t *)src) == 0)
319                 return -1;
320
321         return 0;
322 }
323
324 static void
325 mpipe_infos_get(struct rte_eth_dev *dev __rte_unused,
326                 struct rte_eth_dev_info *dev_info)
327 {
328         dev_info->min_rx_bufsize  = 128;
329         dev_info->max_rx_pktlen   = 1518;
330         dev_info->max_tx_queues   = MPIPE_TX_MAX_QUEUES;
331         dev_info->max_rx_queues   = MPIPE_RX_MAX_QUEUES;
332         dev_info->max_mac_addrs   = 1;
333         dev_info->rx_offload_capa = 0;
334         dev_info->tx_offload_capa = 0;
335 }
336
337 static int
338 mpipe_configure(struct rte_eth_dev *dev)
339 {
340         struct mpipe_dev_priv *priv = mpipe_priv(dev);
341
342         if (dev->data->nb_tx_queues > MPIPE_TX_MAX_QUEUES) {
343                 RTE_LOG(ERR, PMD, "%s: Too many tx queues: %d > %d\n",
344                         mpipe_name(priv), dev->data->nb_tx_queues,
345                         MPIPE_TX_MAX_QUEUES);
346                 return -EINVAL;
347         }
348         priv->nb_tx_queues = dev->data->nb_tx_queues;
349
350         if (dev->data->nb_rx_queues > MPIPE_RX_MAX_QUEUES) {
351                 RTE_LOG(ERR, PMD, "%s: Too many rx queues: %d > %d\n",
352                         mpipe_name(priv), dev->data->nb_rx_queues,
353                         MPIPE_RX_MAX_QUEUES);
354         }
355         priv->nb_rx_queues = dev->data->nb_rx_queues;
356
357         return 0;
358 }
359
360 static inline int
361 mpipe_link_compare(struct rte_eth_link *link1,
362                    struct rte_eth_link *link2)
363 {
364         return (*(uint64_t *)link1 == *(uint64_t *)link2)
365                 ? -1 : 0;
366 }
367
368 static int
369 mpipe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
370 {
371         struct mpipe_dev_priv *priv = mpipe_priv(dev);
372         struct rte_eth_link old, new;
373         int64_t state, speed;
374         int count, rc;
375
376         memset(&old, 0, sizeof(old));
377         memset(&new, 0, sizeof(new));
378         mpipe_dev_atomic_read_link_status(dev, &old);
379
380         for (count = 0, rc = 0; count < MPIPE_LINK_UPDATE_TIMEOUT; count++) {
381                 if (!priv->initialized)
382                         break;
383
384                 state = gxio_mpipe_link_get_attr(&priv->link,
385                                                  GXIO_MPIPE_LINK_CURRENT_STATE);
386                 if (state < 0)
387                         break;
388
389                 speed = state & GXIO_MPIPE_LINK_SPEED_MASK;
390
391                 if (speed == GXIO_MPIPE_LINK_1G) {
392                         new.link_speed = ETH_LINK_SPEED_1000;
393                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
394                         new.link_status = 1;
395                 } else if (speed == GXIO_MPIPE_LINK_10G) {
396                         new.link_speed = ETH_LINK_SPEED_10000;
397                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
398                         new.link_status = 1;
399                 }
400
401                 rc = mpipe_link_compare(&old, &new);
402                 if (rc == 0 || !wait_to_complete)
403                         break;
404
405                 rte_delay_us(MPIPE_LINK_UPDATE_INTERVAL);
406         }
407
408         mpipe_dev_atomic_write_link_status(dev, &new);
409         return rc;
410 }
411
412 static int
413 mpipe_set_link(struct rte_eth_dev *dev, int up)
414 {
415         struct mpipe_dev_priv *priv = mpipe_priv(dev);
416         int rc;
417
418         rc = gxio_mpipe_link_set_attr(&priv->link,
419                                       GXIO_MPIPE_LINK_DESIRED_STATE,
420                                       up ? GXIO_MPIPE_LINK_ANYSPEED : 0);
421         if (rc < 0) {
422                 RTE_LOG(ERR, PMD, "%s: Failed to set link %s.\n",
423                         mpipe_name(priv), up ? "up" : "down");
424         } else {
425                 mpipe_link_update(dev, 0);
426         }
427
428         return rc;
429 }
430
431 static int
432 mpipe_set_link_up(struct rte_eth_dev *dev)
433 {
434         return mpipe_set_link(dev, 1);
435 }
436
437 static int
438 mpipe_set_link_down(struct rte_eth_dev *dev)
439 {
440         return mpipe_set_link(dev, 0);
441 }
442
443 static inline void
444 mpipe_dp_enter(struct mpipe_dev_priv *priv)
445 {
446         __insn_mtspr(SPR_DSTREAM_PF, 0);
447         rte_atomic32_inc(&priv->dp_count);
448 }
449
450 static inline void
451 mpipe_dp_exit(struct mpipe_dev_priv *priv)
452 {
453         rte_atomic32_dec(&priv->dp_count);
454 }
455
456 static inline void
457 mpipe_dp_wait(struct mpipe_dev_priv *priv)
458 {
459         while (rte_atomic32_read(&priv->dp_count) != 0) {
460                 rte_pause();
461         }
462 }
463
464 static inline struct rte_mbuf *
465 mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
466                 int in_port)
467 {
468         void *va = gxio_mpipe_idesc_get_va(idesc);
469         uint16_t size = gxio_mpipe_idesc_get_xfer_size(idesc);
470         struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset);
471
472         rte_pktmbuf_reset(mbuf);
473         mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
474         mbuf->port     = in_port;
475         mbuf->data_len = size;
476         mbuf->pkt_len  = size;
477         mbuf->hash.rss = gxio_mpipe_idesc_get_flow_hash(idesc);
478
479         PMD_DEBUG_RX("%s: RX mbuf %p, buffer %p, buf_addr %p, size %d\n",
480                      mpipe_name(priv), mbuf, va, mbuf->buf_addr, size);
481
482         return mbuf;
483 }
484
485 static inline void
486 mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
487 {
488         const int offset = RTE_PKTMBUF_HEADROOM + MPIPE_RX_IP_ALIGN;
489         void *buf_addr = RTE_PTR_ADD(mbuf->buf_addr, offset);
490
491         gxio_mpipe_push_buffer(priv->context, priv->stack, buf_addr);
492         PMD_DEBUG_RX("%s: Pushed mbuf %p, buffer %p into stack %d\n",
493                      mpipe_name(priv), mbuf, buf_addr, priv->stack);
494 }
495
496 static inline void
497 mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
498 {
499         struct rte_mbuf *mbuf;
500         int i;
501
502         for (i = 0; i < count; i++) {
503                 mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
504                 if (!mbuf)
505                         break;
506                 mpipe_recv_push(priv, mbuf);
507         }
508
509         priv->rx_buffers += count;
510         PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count);
511 }
512
513 static inline void
514 mpipe_recv_flush_stack(struct mpipe_dev_priv *priv)
515 {
516         const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK;
517         uint8_t in_port = priv->port_id;
518         struct rte_mbuf *mbuf;
519         unsigned count;
520         void *va;
521
522         for (count = 0; count < priv->rx_buffers; count++) {
523                 va = gxio_mpipe_pop_buffer(priv->context, priv->stack);
524                 if (!va)
525                         break;
526                 mbuf = RTE_PTR_SUB(va, offset);
527
528                 PMD_DEBUG_RX("%s: Flushing mbuf %p, va %p\n",
529                              mpipe_name(priv), mbuf, va);
530
531                 mbuf->data_off    = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
532                 mbuf->refcnt      = 1;
533                 mbuf->nb_segs     = 1;
534                 mbuf->port        = in_port;
535                 mbuf->packet_type = 0;
536                 mbuf->data_len    = 0;
537                 mbuf->pkt_len     = 0;
538
539                 __rte_mbuf_raw_free(mbuf);
540         }
541
542         PMD_DEBUG_RX("%s: Returned %d/%d buffers\n",
543                      mpipe_name(priv), count, priv->rx_buffers);
544         priv->rx_buffers -= count;
545 }
546
547 static void
548 mpipe_register_segment(struct mpipe_dev_priv *priv, const struct rte_memseg *ms)
549 {
550         size_t size = ms->hugepage_sz;
551         uint8_t *addr, *end;
552         int rc;
553
554         for (addr = ms->addr, end = addr + ms->len; addr < end; addr += size) {
555                 rc = gxio_mpipe_register_page(priv->context, priv->stack, addr,
556                                               size, 0);
557                 if (rc < 0)
558                         break;
559         }
560
561         if (rc < 0) {
562                 RTE_LOG(ERR, PMD, "%s: Could not register memseg @%p, %d.\n",
563                         mpipe_name(priv), ms->addr, rc);
564         } else {
565                 RTE_LOG(DEBUG, PMD, "%s: Registered segment %p - %p\n",
566                         mpipe_name(priv), ms->addr,
567                         RTE_PTR_ADD(ms->addr, ms->len - 1));
568         }
569 }
570
571 static int
572 mpipe_recv_init(struct mpipe_dev_priv *priv)
573 {
574         const struct rte_memseg *seg = rte_eal_get_physmem_layout();
575         size_t stack_size;
576         void *stack_mem;
577         int rc;
578
579         if (!priv->rx_mpool) {
580                 RTE_LOG(ERR, PMD, "%s: No buffer pool.\n",
581                         mpipe_name(priv));
582                 return -ENODEV;
583         }
584
585         /* Allocate one NotifRing for each queue. */
586         rc = gxio_mpipe_alloc_notif_rings(priv->context, MPIPE_RX_MAX_QUEUES,
587                                           0, 0);
588         if (rc < 0) {
589                 RTE_LOG(ERR, PMD, "%s: Failed to allocate notif rings.\n",
590                         mpipe_name(priv));
591                 return rc;
592         }
593         priv->first_ring = rc;
594
595         /* Allocate a NotifGroup. */
596         rc = gxio_mpipe_alloc_notif_groups(priv->context, 1, 0, 0);
597         if (rc < 0) {
598                 RTE_LOG(ERR, PMD, "%s: Failed to allocate rx group.\n",
599                         mpipe_name(priv));
600                 return rc;
601         }
602         priv->notif_group = rc;
603
604         /* Allocate required buckets. */
605         rc = gxio_mpipe_alloc_buckets(priv->context, MPIPE_RX_BUCKETS, 0, 0);
606         if (rc < 0) {
607                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buckets.\n",
608                         mpipe_name(priv));
609                 return rc;
610         }
611         priv->first_bucket = rc;
612
613         rc = gxio_mpipe_alloc_buffer_stacks(priv->context, 1, 0, 0);
614         if (rc < 0) {
615                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer stack.\n",
616                         mpipe_name(priv));
617                 return rc;
618         }
619         priv->stack = rc;
620
621         while (seg && seg->addr)
622                 mpipe_register_segment(priv, seg++);
623
624         stack_size = gxio_mpipe_calc_buffer_stack_bytes(MPIPE_RX_STACK_SIZE);
625         stack_mem = rte_zmalloc(NULL, stack_size, 65536);
626         if (!stack_mem) {
627                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer memory.\n",
628                         mpipe_name(priv));
629                 return -ENOMEM;
630         } else {
631                 RTE_LOG(DEBUG, PMD, "%s: Buffer stack memory %p - %p.\n",
632                         mpipe_name(priv), stack_mem,
633                         RTE_PTR_ADD(stack_mem, stack_size - 1));
634         }
635
636         rc = gxio_mpipe_init_buffer_stack(priv->context, priv->stack,
637                                           priv->rx_size_code, stack_mem,
638                                           stack_size, 0);
639         if (rc < 0) {
640                 RTE_LOG(ERR, PMD, "%s: Failed to initialize buffer stack.\n",
641                         mpipe_name(priv));
642                 return rc;
643         }
644
645         return 0;
646 }
647
648 static int
649 mpipe_xmit_init(struct mpipe_dev_priv *priv)
650 {
651         size_t ring_size;
652         void *ring_mem;
653         int rc;
654
655         /* Allocate eDMA ring. */
656         rc = gxio_mpipe_alloc_edma_rings(priv->context, 1, 0, 0);
657         if (rc < 0) {
658                 RTE_LOG(ERR, PMD, "%s: Failed to alloc tx ring.\n",
659                         mpipe_name(priv));
660                 return rc;
661         }
662         priv->ering = rc;
663
664         rc = mpipe_equeue_size(MPIPE_TX_DESCS);
665         if (rc < 0) {
666                 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d equeue descs.\n",
667                         mpipe_name(priv), (int)MPIPE_TX_DESCS);
668                 return -ENOMEM;
669         }
670         priv->equeue_size = rc;
671
672         /* Initialize completion array. */
673         ring_size = sizeof(priv->tx_comps[0]) * priv->equeue_size;
674         priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE);
675         if (!priv->tx_comps) {
676                 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress comps.\n",
677                         mpipe_name(priv));
678                 return -ENOMEM;
679         }
680
681         /* Allocate eDMA ring memory. */
682         ring_size = sizeof(gxio_mpipe_edesc_t) * priv->equeue_size;
683         ring_mem = rte_zmalloc(NULL, ring_size, ring_size);
684         if (!ring_mem) {
685                 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress descs.\n",
686                         mpipe_name(priv));
687                 return -ENOMEM;
688         } else {
689                 RTE_LOG(DEBUG, PMD, "%s: eDMA ring memory %p - %p.\n",
690                         mpipe_name(priv), ring_mem,
691                         RTE_PTR_ADD(ring_mem, ring_size - 1));
692         }
693
694         /* Initialize eDMA ring. */
695         rc = gxio_mpipe_equeue_init(&priv->equeue, priv->context, priv->ering,
696                                     priv->channel, ring_mem, ring_size, 0);
697         if (rc < 0) {
698                 RTE_LOG(ERR, PMD, "%s: Failed to init equeue\n",
699                         mpipe_name(priv));
700                 return rc;
701         }
702
703         return 0;
704 }
705
706 static int
707 mpipe_link_init(struct mpipe_dev_priv *priv)
708 {
709         int rc;
710
711         /* Open the link. */
712         rc = gxio_mpipe_link_open(&priv->link, priv->context,
713                                   mpipe_name(priv), GXIO_MPIPE_LINK_AUTO_NONE);
714         if (rc < 0) {
715                 RTE_LOG(ERR, PMD, "%s: Failed to open link.\n",
716                         mpipe_name(priv));
717                 return rc;
718         }
719
720         /* Get the channel index. */
721         rc = gxio_mpipe_link_channel(&priv->link);
722         if (rc < 0) {
723                 RTE_LOG(ERR, PMD, "%s: Bad channel\n",
724                         mpipe_name(priv));
725                 return rc;
726         }
727         priv->channel = rc;
728
729         return 0;
730 }
731
732 static int
733 mpipe_init(struct mpipe_dev_priv *priv)
734 {
735         int rc;
736
737         if (priv->initialized)
738                 return 0;
739
740         rc = mpipe_link_init(priv);
741         if (rc < 0) {
742                 RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
743                         mpipe_name(priv));
744                 return rc;
745         }
746
747         rc = mpipe_recv_init(priv);
748         if (rc < 0) {
749                 RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n",
750                         mpipe_name(priv));
751                 return rc;
752         }
753
754         rc = mpipe_xmit_init(priv);
755         if (rc < 0) {
756                 RTE_LOG(ERR, PMD, "%s: Failed to init tx.\n",
757                         mpipe_name(priv));
758                 rte_free(priv);
759                 return rc;
760         }
761
762         priv->initialized = 1;
763
764         return 0;
765 }
766
767 static int
768 mpipe_start(struct rte_eth_dev *dev)
769 {
770         struct mpipe_dev_priv *priv = mpipe_priv(dev);
771         struct mpipe_channel_config config;
772         struct mpipe_rx_queue *rx_queue;
773         struct rte_eth_link eth_link;
774         unsigned queue, buffers = 0;
775         size_t ring_size;
776         void *ring_mem;
777         int rc;
778
779         memset(&eth_link, 0, sizeof(eth_link));
780         mpipe_dev_atomic_write_link_status(dev, &eth_link);
781
782         rc = mpipe_init(priv);
783         if (rc < 0)
784                 return rc;
785
786         /* Initialize NotifRings. */
787         for (queue = 0; queue < priv->nb_rx_queues; queue++) {
788                 rx_queue = mpipe_rx_queue(priv, queue);
789                 ring_size = rx_queue->q.nb_desc * sizeof(gxio_mpipe_idesc_t);
790
791                 ring_mem = rte_malloc(NULL, ring_size, ring_size);
792                 if (!ring_mem) {
793                         RTE_LOG(ERR, PMD, "%s: Failed to alloc rx descs.\n",
794                                 mpipe_name(priv));
795                         return -ENOMEM;
796                 } else {
797                         RTE_LOG(DEBUG, PMD, "%s: iDMA ring %d memory %p - %p.\n",
798                                 mpipe_name(priv), queue, ring_mem,
799                                 RTE_PTR_ADD(ring_mem, ring_size - 1));
800                 }
801
802                 rc = gxio_mpipe_iqueue_init(&rx_queue->iqueue, priv->context,
803                                             priv->first_ring + queue, ring_mem,
804                                             ring_size, 0);
805                 if (rc < 0) {
806                         RTE_LOG(ERR, PMD, "%s: Failed to init rx queue.\n",
807                                 mpipe_name(priv));
808                         return rc;
809                 }
810
811                 rx_queue->rx_ring_mem = ring_mem;
812                 buffers += rx_queue->q.nb_desc;
813         }
814
815         /* Initialize ingress NotifGroup and buckets. */
816         rc = gxio_mpipe_init_notif_group_and_buckets(priv->context,
817                         priv->notif_group, priv->first_ring, priv->nb_rx_queues,
818                         priv->first_bucket, MPIPE_RX_BUCKETS,
819                         GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY);
820         if (rc < 0) {
821                 RTE_LOG(ERR, PMD, "%s: Failed to init group and buckets.\n",
822                         mpipe_name(priv));
823                 return rc;
824         }
825
826         /* Configure the classifier to deliver packets from this port. */
827         config.enable = 1;
828         config.first_bucket = priv->first_bucket;
829         config.num_buckets = MPIPE_RX_BUCKETS;
830         memset(&config.stacks, 0xff, sizeof(config.stacks));
831         config.stacks.stacks[priv->rx_size_code] = priv->stack;
832         config.head_room = priv->rx_offset & RTE_MEMPOOL_ALIGN_MASK;
833
834         rc = mpipe_channel_config(priv->instance, priv->channel,
835                                   &config);
836         if (rc < 0) {
837                 RTE_LOG(ERR, PMD, "%s: Failed to setup classifier.\n",
838                         mpipe_name(priv));
839                 return rc;
840         }
841
842         /* Fill empty buffers into the buffer stack. */
843         mpipe_recv_fill_stack(priv, buffers);
844
845         /* Bring up the link. */
846         mpipe_set_link_up(dev);
847
848         /* Start xmit/recv on queues. */
849         for (queue = 0; queue < priv->nb_tx_queues; queue++)
850                 mpipe_tx_queue(priv, queue)->q.link_status = 1;
851         for (queue = 0; queue < priv->nb_rx_queues; queue++)
852                 mpipe_rx_queue(priv, queue)->q.link_status = 1;
853         priv->running = 1;
854
855         return 0;
856 }
857
858 static void
859 mpipe_stop(struct rte_eth_dev *dev)
860 {
861         struct mpipe_dev_priv *priv = mpipe_priv(dev);
862         struct mpipe_channel_config config;
863         unsigned queue;
864         int rc;
865
866         for (queue = 0; queue < priv->nb_tx_queues; queue++)
867                 mpipe_tx_queue(priv, queue)->q.link_status = 0;
868         for (queue = 0; queue < priv->nb_rx_queues; queue++)
869                 mpipe_rx_queue(priv, queue)->q.link_status = 0;
870
871         /* Make sure the link_status writes land. */
872         rte_wmb();
873
874         /*
875          * Wait for link_status change to register with straggling datapath
876          * threads.
877          */
878         mpipe_dp_wait(priv);
879
880         /* Bring down the link. */
881         mpipe_set_link_down(dev);
882
883         /* Remove classifier rules. */
884         memset(&config, 0, sizeof(config));
885         rc = mpipe_channel_config(priv->instance, priv->channel,
886                                   &config);
887         if (rc < 0) {
888                 RTE_LOG(ERR, PMD, "%s: Failed to stop classifier.\n",
889                         mpipe_name(priv));
890         }
891
892         /* Flush completed xmit packets. */
893         mpipe_xmit_flush(priv);
894
895         /* Flush buffer stacks. */
896         mpipe_recv_flush(priv);
897
898         priv->running = 0;
899 }
900
901 static void
902 mpipe_close(struct rte_eth_dev *dev)
903 {
904         struct mpipe_dev_priv *priv = mpipe_priv(dev);
905         if (priv->running)
906                 mpipe_stop(dev);
907 }
908
909 static void
910 mpipe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
911 {
912         struct mpipe_dev_priv *priv = mpipe_priv(dev);
913         struct mpipe_tx_queue *tx_queue;
914         struct mpipe_rx_queue *rx_queue;
915         unsigned i;
916         uint16_t idx;
917
918         memset(stats, 0, sizeof(*stats));
919
920         for (i = 0; i < priv->nb_tx_queues; i++) {
921                 tx_queue = mpipe_tx_queue(priv, i);
922
923                 stats->opackets += tx_queue->q.stats.packets;
924                 stats->obytes   += tx_queue->q.stats.bytes;
925                 stats->oerrors  += tx_queue->q.stats.errors;
926
927                 idx = tx_queue->q.stat_idx;
928                 if (idx != (uint16_t)-1) {
929                         stats->q_opackets[idx] += tx_queue->q.stats.packets;
930                         stats->q_obytes[idx]   += tx_queue->q.stats.bytes;
931                         stats->q_errors[idx]   += tx_queue->q.stats.errors;
932                 }
933         }
934
935         for (i = 0; i < priv->nb_rx_queues; i++) {
936                 rx_queue = mpipe_rx_queue(priv, i);
937
938                 stats->ipackets  += rx_queue->q.stats.packets;
939                 stats->ibytes    += rx_queue->q.stats.bytes;
940                 stats->ierrors   += rx_queue->q.stats.errors;
941                 stats->rx_nombuf += rx_queue->q.stats.nomem;
942
943                 idx = rx_queue->q.stat_idx;
944                 if (idx != (uint16_t)-1) {
945                         stats->q_ipackets[idx] += rx_queue->q.stats.packets;
946                         stats->q_ibytes[idx]   += rx_queue->q.stats.bytes;
947                         stats->q_errors[idx]   += rx_queue->q.stats.errors;
948                 }
949         }
950 }
951
952 static void
953 mpipe_stats_reset(struct rte_eth_dev *dev)
954 {
955         struct mpipe_dev_priv *priv = mpipe_priv(dev);
956         struct mpipe_tx_queue *tx_queue;
957         struct mpipe_rx_queue *rx_queue;
958         unsigned i;
959
960         for (i = 0; i < priv->nb_tx_queues; i++) {
961                 tx_queue = mpipe_tx_queue(priv, i);
962                 memset(&tx_queue->q.stats, 0, sizeof(tx_queue->q.stats));
963         }
964
965         for (i = 0; i < priv->nb_rx_queues; i++) {
966                 rx_queue = mpipe_rx_queue(priv, i);
967                 memset(&rx_queue->q.stats, 0, sizeof(rx_queue->q.stats));
968         }
969 }
970
971 static int
972 mpipe_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id,
973                               uint8_t stat_idx, uint8_t is_rx)
974 {
975         struct mpipe_dev_priv *priv = mpipe_priv(dev);
976
977         if (is_rx) {
978                 priv->rx_stat_mapping[stat_idx] = queue_id;
979         } else {
980                 priv->tx_stat_mapping[stat_idx] = queue_id;
981         }
982
983         return 0;
984 }
985
986 static int
987 mpipe_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
988                      uint16_t nb_desc, unsigned int socket_id __rte_unused,
989                      const struct rte_eth_txconf *tx_conf __rte_unused)
990 {
991         struct mpipe_tx_queue *tx_queue = dev->data->tx_queues[queue_idx];
992         struct mpipe_dev_priv *priv = mpipe_priv(dev);
993         uint16_t idx;
994
995         tx_queue = rte_realloc(tx_queue, sizeof(*tx_queue),
996                                RTE_CACHE_LINE_SIZE);
997         if (!tx_queue) {
998                 RTE_LOG(ERR, PMD, "%s: Failed to allocate TX queue.\n",
999                         mpipe_name(priv));
1000                 return -ENOMEM;
1001         }
1002
1003         memset(&tx_queue->q, 0, sizeof(tx_queue->q));
1004         tx_queue->q.priv = priv;
1005         tx_queue->q.queue_idx = queue_idx;
1006         tx_queue->q.port_id = dev->data->port_id;
1007         tx_queue->q.nb_desc = nb_desc;
1008
1009         tx_queue->q.stat_idx = -1;
1010         for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1011                 if (priv->tx_stat_mapping[idx] == queue_idx)
1012                         tx_queue->q.stat_idx = idx;
1013         }
1014
1015         dev->data->tx_queues[queue_idx] = tx_queue;
1016
1017         return 0;
1018 }
1019
1020 static void
1021 mpipe_tx_queue_release(void *_txq)
1022 {
1023         rte_free(_txq);
1024 }
1025
1026 static int
1027 mpipe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1028                      uint16_t nb_desc, unsigned int socket_id __rte_unused,
1029                      const struct rte_eth_rxconf *rx_conf __rte_unused,
1030                      struct rte_mempool *mp)
1031 {
1032         struct mpipe_rx_queue *rx_queue = dev->data->rx_queues[queue_idx];
1033         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1034         uint16_t idx;
1035         int size, rc;
1036
1037         rc = mpipe_iqueue_size(nb_desc);
1038         if (rc < 0) {
1039                 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d iqueue descs.\n",
1040                         mpipe_name(priv), (int)nb_desc);
1041                 return -ENOMEM;
1042         }
1043
1044         if (rc != nb_desc) {
1045                 RTE_LOG(WARNING, PMD, "%s: Extending RX descs from %d to %d.\n",
1046                         mpipe_name(priv), (int)nb_desc, rc);
1047                 nb_desc = rc;
1048         }
1049
1050         size = sizeof(*rx_queue);
1051         rx_queue = rte_realloc(rx_queue, size, RTE_CACHE_LINE_SIZE);
1052         if (!rx_queue) {
1053                 RTE_LOG(ERR, PMD, "%s: Failed to allocate RX queue.\n",
1054                         mpipe_name(priv));
1055                 return -ENOMEM;
1056         }
1057
1058         memset(&rx_queue->q, 0, sizeof(rx_queue->q));
1059         rx_queue->q.priv = priv;
1060         rx_queue->q.nb_desc = nb_desc;
1061         rx_queue->q.port_id = dev->data->port_id;
1062         rx_queue->q.queue_idx = queue_idx;
1063
1064         if (!priv->rx_mpool) {
1065                 int size = (rte_pktmbuf_data_room_size(mp) -
1066                             RTE_PKTMBUF_HEADROOM -
1067                             MPIPE_RX_IP_ALIGN);
1068
1069                 priv->rx_offset = (sizeof(struct rte_mbuf) +
1070                                    rte_pktmbuf_priv_size(mp) +
1071                                    RTE_PKTMBUF_HEADROOM +
1072                                    MPIPE_RX_IP_ALIGN);
1073                 if (size < 0) {
1074                         RTE_LOG(ERR, PMD, "%s: Bad buffer size %d.\n",
1075                                 mpipe_name(priv),
1076                                 rte_pktmbuf_data_room_size(mp));
1077                         return -ENOMEM;
1078                 }
1079
1080                 priv->rx_size_code = mpipe_buffer_size_index(size);
1081                 priv->rx_mpool = mp;
1082         }
1083
1084         if (priv->rx_mpool != mp) {
1085                 RTE_LOG(WARNING, PMD, "%s: Ignoring multiple buffer pools.\n",
1086                         mpipe_name(priv));
1087         }
1088
1089         rx_queue->q.stat_idx = -1;
1090         for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1091                 if (priv->rx_stat_mapping[idx] == queue_idx)
1092                         rx_queue->q.stat_idx = idx;
1093         }
1094
1095         dev->data->rx_queues[queue_idx] = rx_queue;
1096
1097         return 0;
1098 }
1099
1100 static void
1101 mpipe_rx_queue_release(void *_rxq)
1102 {
1103         rte_free(_rxq);
1104 }
1105
1106 #define MPIPE_XGBE_ENA_HASH_MULTI       \
1107         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT)
1108 #define MPIPE_XGBE_ENA_HASH_UNI         \
1109         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT)
1110 #define MPIPE_XGBE_COPY_ALL             \
1111         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT)
1112 #define MPIPE_GBE_ENA_MULTI_HASH        \
1113         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT)
1114 #define MPIPE_GBE_ENA_UNI_HASH          \
1115         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT)
1116 #define MPIPE_GBE_COPY_ALL              \
1117         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT)
1118
1119 static void
1120 mpipe_promiscuous_enable(struct rte_eth_dev *dev)
1121 {
1122         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1123         int64_t reg;
1124         int addr;
1125
1126         if (priv->is_xaui) {
1127                 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1128                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1129                 reg &= ~MPIPE_XGBE_ENA_HASH_MULTI;
1130                 reg &= ~MPIPE_XGBE_ENA_HASH_UNI;
1131                 reg |=  MPIPE_XGBE_COPY_ALL;
1132                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1133         } else {
1134                 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1135                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1136                 reg &= ~MPIPE_GBE_ENA_MULTI_HASH;
1137                 reg &= ~MPIPE_GBE_ENA_UNI_HASH;
1138                 reg |=  MPIPE_GBE_COPY_ALL;
1139                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1140         }
1141 }
1142
1143 static void
1144 mpipe_promiscuous_disable(struct rte_eth_dev *dev)
1145 {
1146         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1147         int64_t reg;
1148         int addr;
1149
1150         if (priv->is_xaui) {
1151                 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1152                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1153                 reg |=  MPIPE_XGBE_ENA_HASH_MULTI;
1154                 reg |=  MPIPE_XGBE_ENA_HASH_UNI;
1155                 reg &= ~MPIPE_XGBE_COPY_ALL;
1156                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1157         } else {
1158                 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1159                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1160                 reg |=  MPIPE_GBE_ENA_MULTI_HASH;
1161                 reg |=  MPIPE_GBE_ENA_UNI_HASH;
1162                 reg &= ~MPIPE_GBE_COPY_ALL;
1163                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1164         }
1165 }
1166
1167 static struct eth_dev_ops mpipe_dev_ops = {
1168         .dev_infos_get           = mpipe_infos_get,
1169         .dev_configure           = mpipe_configure,
1170         .dev_start               = mpipe_start,
1171         .dev_stop                = mpipe_stop,
1172         .dev_close               = mpipe_close,
1173         .stats_get               = mpipe_stats_get,
1174         .stats_reset             = mpipe_stats_reset,
1175         .queue_stats_mapping_set = mpipe_queue_stats_mapping_set,
1176         .tx_queue_setup          = mpipe_tx_queue_setup,
1177         .rx_queue_setup          = mpipe_rx_queue_setup,
1178         .tx_queue_release        = mpipe_tx_queue_release,
1179         .rx_queue_release        = mpipe_rx_queue_release,
1180         .link_update             = mpipe_link_update,
1181         .dev_set_link_up         = mpipe_set_link_up,
1182         .dev_set_link_down       = mpipe_set_link_down,
1183         .promiscuous_enable      = mpipe_promiscuous_enable,
1184         .promiscuous_disable     = mpipe_promiscuous_disable,
1185 };
1186
1187 static inline void
1188 mpipe_xmit_null(struct mpipe_dev_priv *priv, int64_t start, int64_t end)
1189 {
1190         gxio_mpipe_edesc_t null_desc = { { .bound = 1, .ns = 1 } };
1191         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1192         int64_t slot;
1193
1194         for (slot = start; slot < end; slot++) {
1195                 gxio_mpipe_equeue_put_at(equeue, null_desc, slot);
1196         }
1197 }
1198
1199 static void
1200 mpipe_xmit_flush(struct mpipe_dev_priv *priv)
1201 {
1202         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1203         int64_t slot;
1204
1205         /* Post a dummy descriptor and wait for its return. */
1206         slot = gxio_mpipe_equeue_reserve(equeue, 1);
1207         if (slot < 0) {
1208                 RTE_LOG(ERR, PMD, "%s: Failed to reserve stop slot.\n",
1209                         mpipe_name(priv));
1210                 return;
1211         }
1212
1213         mpipe_xmit_null(priv, slot, slot + 1);
1214
1215         while (!gxio_mpipe_equeue_is_complete(equeue, slot, 1)) {
1216                 rte_pause();
1217         }
1218
1219         for (slot = 0; slot < priv->equeue_size; slot++) {
1220                 if (priv->tx_comps[slot])
1221                         rte_pktmbuf_free_seg(priv->tx_comps[slot]);
1222         }
1223 }
1224
1225 static void
1226 mpipe_recv_flush(struct mpipe_dev_priv *priv)
1227 {
1228         uint8_t in_port = priv->port_id;
1229         struct mpipe_rx_queue *rx_queue;
1230         gxio_mpipe_iqueue_t *iqueue;
1231         gxio_mpipe_idesc_t idesc;
1232         struct rte_mbuf *mbuf;
1233         int retries = 0;
1234         unsigned queue;
1235
1236         do {
1237                 mpipe_recv_flush_stack(priv);
1238
1239                 /* Flush packets sitting in recv queues. */
1240                 for (queue = 0; queue < priv->nb_rx_queues; queue++) {
1241                         rx_queue = mpipe_rx_queue(priv, queue);
1242                         iqueue = &rx_queue->iqueue;
1243                         while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
1244                                 mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
1245                                 rte_pktmbuf_free(mbuf);
1246                                 priv->rx_buffers--;
1247                         }
1248                         rte_free(rx_queue->rx_ring_mem);
1249                 }
1250         } while (retries++ < 10 && priv->rx_buffers);
1251
1252         if (priv->rx_buffers) {
1253                 RTE_LOG(ERR, PMD, "%s: Leaked %d receive buffers.\n",
1254                         mpipe_name(priv), priv->rx_buffers);
1255         } else {
1256                 PMD_DEBUG_RX("%s: Returned all receive buffers.\n",
1257                              mpipe_name(priv));
1258         }
1259 }
1260
1261 static inline uint16_t
1262 mpipe_do_xmit(struct mpipe_tx_queue *tx_queue, struct rte_mbuf **tx_pkts,
1263               uint16_t nb_pkts)
1264 {
1265         struct mpipe_dev_priv *priv = tx_queue->q.priv;
1266         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1267         unsigned nb_bytes = 0;
1268         unsigned nb_sent = 0;
1269         int nb_slots, i;
1270
1271         PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
1272                      nb_pkts, mpipe_name(tx_queue->q.priv),
1273                      tx_queue->q.queue_idx);
1274
1275         /* Optimistic assumption that we need exactly one slot per packet. */
1276         nb_slots = RTE_MIN(nb_pkts, MPIPE_TX_DESCS / 2);
1277
1278         do {
1279                 struct rte_mbuf *mbuf = NULL, *pkt = NULL;
1280                 int64_t slot;
1281
1282                 /* Reserve eDMA ring slots. */
1283                 slot = gxio_mpipe_equeue_try_reserve_fast(equeue, nb_slots);
1284                 if (unlikely(slot < 0)) {
1285                         break;
1286                 }
1287
1288                 for (i = 0; i < nb_slots; i++) {
1289                         unsigned idx = (slot + i) & (priv->equeue_size - 1);
1290                         rte_prefetch0(priv->tx_comps[idx]);
1291                 }
1292
1293                 /* Fill up slots with descriptor and completion info. */
1294                 for (i = 0; i < nb_slots; i++) {
1295                         unsigned idx = (slot + i) & (priv->equeue_size - 1);
1296                         gxio_mpipe_edesc_t desc;
1297                         struct rte_mbuf *next;
1298
1299                         /* Starting on a new packet? */
1300                         if (likely(!mbuf)) {
1301                                 int room = nb_slots - i;
1302
1303                                 pkt = mbuf = tx_pkts[nb_sent];
1304
1305                                 /* Bail out if we run out of descs. */
1306                                 if (unlikely(pkt->nb_segs > room))
1307                                         break;
1308
1309                                 nb_sent++;
1310                         }
1311
1312                         /* We have a segment to send. */
1313                         next = mbuf->next;
1314
1315                         if (priv->tx_comps[idx])
1316                                 rte_pktmbuf_free_seg(priv->tx_comps[idx]);
1317
1318                         desc = (gxio_mpipe_edesc_t) { {
1319                                 .va        = rte_pktmbuf_mtod(mbuf, uintptr_t),
1320                                 .xfer_size = rte_pktmbuf_data_len(mbuf),
1321                                 .bound     = next ? 0 : 1,
1322                         } };
1323
1324                         nb_bytes += mbuf->data_len;
1325                         priv->tx_comps[idx] = mbuf;
1326                         gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
1327
1328                         PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
1329                                      mpipe_name(priv),
1330                                      tx_queue->q.queue_idx,
1331                                      rte_pktmbuf_mtod(mbuf, void *),
1332                                      rte_pktmbuf_data_len(mbuf));
1333
1334                         mbuf = next;
1335                 }
1336
1337                 if (unlikely(nb_sent < nb_pkts)) {
1338
1339                         /* Fill remaining slots with null descriptors. */
1340                         mpipe_xmit_null(priv, slot + i, slot + nb_slots);
1341
1342                         /*
1343                          * Calculate exact number of descriptors needed for
1344                          * the next go around.
1345                          */
1346                         nb_slots = 0;
1347                         for (i = nb_sent; i < nb_pkts; i++) {
1348                                 nb_slots += tx_pkts[i]->nb_segs;
1349                         }
1350
1351                         nb_slots = RTE_MIN(nb_slots, MPIPE_TX_DESCS / 2);
1352                 }
1353         } while (nb_sent < nb_pkts);
1354
1355         tx_queue->q.stats.packets += nb_sent;
1356         tx_queue->q.stats.bytes   += nb_bytes;
1357
1358         return nb_sent;
1359 }
1360
1361 static inline uint16_t
1362 mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
1363               uint16_t nb_pkts)
1364 {
1365         struct mpipe_dev_priv *priv = rx_queue->q.priv;
1366         gxio_mpipe_iqueue_t *iqueue = &rx_queue->iqueue;
1367         gxio_mpipe_idesc_t *first_idesc, *idesc, *last_idesc;
1368         uint8_t in_port = rx_queue->q.port_id;
1369         const unsigned look_ahead = 8;
1370         int room = nb_pkts, rc = 0;
1371         unsigned nb_packets = 0;
1372         unsigned nb_dropped = 0;
1373         unsigned nb_nomem = 0;
1374         unsigned nb_bytes = 0;
1375         unsigned nb_descs, i;
1376
1377         while (room && !rc) {
1378                 if (rx_queue->avail_descs < room) {
1379                         rc = gxio_mpipe_iqueue_try_peek(iqueue,
1380                                                         &rx_queue->next_desc);
1381                         rx_queue->avail_descs = rc < 0 ? 0 : rc;
1382                 }
1383
1384                 if (unlikely(!rx_queue->avail_descs)) {
1385                         break;
1386                 }
1387
1388                 nb_descs = RTE_MIN(room, rx_queue->avail_descs);
1389
1390                 first_idesc = rx_queue->next_desc;
1391                 last_idesc  = first_idesc + nb_descs;
1392
1393                 rx_queue->next_desc   += nb_descs;
1394                 rx_queue->avail_descs -= nb_descs;
1395
1396                 for (i = 1; i < look_ahead; i++) {
1397                         rte_prefetch0(first_idesc + i);
1398                 }
1399
1400                 PMD_DEBUG_RX("%s:%d: Trying to receive %d packets\n",
1401                              mpipe_name(rx_queue->q.priv),
1402                              rx_queue->q.queue_idx,
1403                              nb_descs);
1404
1405                 for (idesc = first_idesc; idesc < last_idesc; idesc++) {
1406                         struct rte_mbuf *mbuf;
1407
1408                         PMD_DEBUG_RX("%s:%d: processing idesc %d/%d\n",
1409                                      mpipe_name(priv),
1410                                      rx_queue->q.queue_idx,
1411                                      nb_packets, nb_descs);
1412
1413                         rte_prefetch0(idesc + look_ahead);
1414
1415                         PMD_DEBUG_RX("%s:%d: idesc %p, %s%s%s%s%s%s%s%s%s%s"
1416                                      "size: %d, bkt: %d, chan: %d, ring: %d, sqn: %lu, va: %lu\n",
1417                                      mpipe_name(priv),
1418                                      rx_queue->q.queue_idx,
1419                                      idesc,
1420                                      idesc->me ? "me, " : "",
1421                                      idesc->tr ? "tr, " : "",
1422                                      idesc->ce ? "ce, " : "",
1423                                      idesc->ct ? "ct, " : "",
1424                                      idesc->cs ? "cs, " : "",
1425                                      idesc->nr ? "nr, " : "",
1426                                      idesc->sq ? "sq, " : "",
1427                                      idesc->ts ? "ts, " : "",
1428                                      idesc->ps ? "ps, " : "",
1429                                      idesc->be ? "be, " : "",
1430                                      idesc->l2_size,
1431                                      idesc->bucket_id,
1432                                      idesc->channel,
1433                                      idesc->notif_ring,
1434                                      (unsigned long)idesc->packet_sqn,
1435                                      (unsigned long)idesc->va);
1436
1437                         if (unlikely(gxio_mpipe_idesc_has_error(idesc))) {
1438                                 nb_dropped++;
1439                                 gxio_mpipe_iqueue_drop(iqueue, idesc);
1440                                 PMD_DEBUG_RX("%s:%d: Descriptor error\n",
1441                                              mpipe_name(rx_queue->q.priv),
1442                                              rx_queue->q.queue_idx);
1443                                 continue;
1444                         }
1445
1446                         mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool);
1447                         if (unlikely(!mbuf)) {
1448                                 nb_nomem++;
1449                                 gxio_mpipe_iqueue_drop(iqueue, idesc);
1450                                 PMD_DEBUG_RX("%s:%d: RX alloc failure\n",
1451                                              mpipe_name(rx_queue->q.priv),
1452                                              rx_queue->q.queue_idx);
1453                                 continue;
1454                         }
1455
1456                         mpipe_recv_push(priv, mbuf);
1457
1458                         /* Get and setup the mbuf for the received packet. */
1459                         mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
1460
1461                         /* Update results and statistics counters. */
1462                         rx_pkts[nb_packets] = mbuf;
1463                         nb_bytes += mbuf->pkt_len;
1464                         nb_packets++;
1465                 }
1466
1467                 /*
1468                  * We release the ring in bursts, but do not track and release
1469                  * buckets.  This therefore breaks dynamic flow affinity, but
1470                  * we always operate in static affinity mode, and so we're OK
1471                  * with this optimization.
1472                  */
1473                 gxio_mpipe_iqueue_advance(iqueue, nb_descs);
1474                 gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, nb_descs);
1475
1476                 /*
1477                  * Go around once more if we haven't yet peeked the queue, and
1478                  * if we have more room to receive.
1479                  */
1480                 room = nb_pkts - nb_packets;
1481         }
1482
1483         rx_queue->q.stats.packets += nb_packets;
1484         rx_queue->q.stats.bytes   += nb_bytes;
1485         rx_queue->q.stats.errors  += nb_dropped;
1486         rx_queue->q.stats.nomem   += nb_nomem;
1487
1488         PMD_DEBUG_RX("%s:%d: RX: %d/%d pkts/bytes, %d/%d drops/nomem\n",
1489                      mpipe_name(rx_queue->q.priv), rx_queue->q.queue_idx,
1490                      nb_packets, nb_bytes, nb_dropped, nb_nomem);
1491
1492         return nb_packets;
1493 }
1494
1495 static uint16_t
1496 mpipe_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1497 {
1498         struct mpipe_rx_queue *rx_queue = _rxq;
1499         uint16_t result = 0;
1500
1501         if (rx_queue) {
1502                 mpipe_dp_enter(rx_queue->q.priv);
1503                 if (likely(rx_queue->q.link_status))
1504                         result = mpipe_do_recv(rx_queue, rx_pkts, nb_pkts);
1505                 mpipe_dp_exit(rx_queue->q.priv);
1506         }
1507
1508         return result;
1509 }
1510
1511 static uint16_t
1512 mpipe_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1513 {
1514         struct mpipe_tx_queue *tx_queue = _txq;
1515         uint16_t result = 0;
1516
1517         if (tx_queue) {
1518                 mpipe_dp_enter(tx_queue->q.priv);
1519                 if (likely(tx_queue->q.link_status))
1520                         result = mpipe_do_xmit(tx_queue, tx_pkts, nb_pkts);
1521                 mpipe_dp_exit(tx_queue->q.priv);
1522         }
1523
1524         return result;
1525 }
1526
1527 static int
1528 mpipe_link_mac(const char *ifname, uint8_t *mac)
1529 {
1530         int rc, idx;
1531         char name[GXIO_MPIPE_LINK_NAME_LEN];
1532
1533         for (idx = 0, rc = 0; !rc; idx++) {
1534                 rc = gxio_mpipe_link_enumerate_mac(idx, name, mac);
1535                 if (!rc && !strncmp(name, ifname, GXIO_MPIPE_LINK_NAME_LEN))
1536                         return 0;
1537         }
1538         return -ENODEV;
1539 }
1540
1541 static int
1542 rte_pmd_mpipe_devinit(const char *ifname,
1543                       const char *params __rte_unused)
1544 {
1545         gxio_mpipe_context_t *context;
1546         struct rte_eth_dev *eth_dev;
1547         struct mpipe_dev_priv *priv;
1548         int instance, rc;
1549         uint8_t *mac;
1550
1551         /* Get the mPIPE instance that the device belongs to. */
1552         instance = gxio_mpipe_link_instance(ifname);
1553         context = mpipe_context(instance);
1554         if (!context) {
1555                 RTE_LOG(ERR, PMD, "%s: No device for link.\n", ifname);
1556                 return -ENODEV;
1557         }
1558
1559         priv = rte_zmalloc(NULL, sizeof(*priv), 0);
1560         if (!priv) {
1561                 RTE_LOG(ERR, PMD, "%s: Failed to allocate priv.\n", ifname);
1562                 return -ENOMEM;
1563         }
1564
1565         memset(&priv->tx_stat_mapping, 0xff, sizeof(priv->tx_stat_mapping));
1566         memset(&priv->rx_stat_mapping, 0xff, sizeof(priv->rx_stat_mapping));
1567         priv->context = context;
1568         priv->instance = instance;
1569         priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0);
1570         priv->channel = -1;
1571
1572         mac = priv->mac_addr.addr_bytes;
1573         rc = mpipe_link_mac(ifname, mac);
1574         if (rc < 0) {
1575                 RTE_LOG(ERR, PMD, "%s: Failed to enumerate link.\n", ifname);
1576                 rte_free(priv);
1577                 return -ENODEV;
1578         }
1579
1580         eth_dev = rte_eth_dev_allocate(ifname, RTE_ETH_DEV_VIRTUAL);
1581         if (!eth_dev) {
1582                 RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
1583                 rte_free(priv);
1584                 return -ENOMEM;
1585         }
1586
1587         RTE_LOG(INFO, PMD, "%s: Initialized mpipe device"
1588                 "(mac %02x:%02x:%02x:%02x:%02x:%02x).\n",
1589                 ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1590
1591         priv->eth_dev = eth_dev;
1592         priv->port_id = eth_dev->data->port_id;
1593         eth_dev->data->dev_private = priv;
1594         eth_dev->data->mac_addrs = &priv->mac_addr;
1595
1596         eth_dev->data->dev_flags = 0;
1597         eth_dev->data->kdrv = RTE_KDRV_NONE;
1598         eth_dev->driver = NULL;
1599         eth_dev->data->drv_name = drivername;
1600         eth_dev->data->numa_node = instance;
1601
1602         eth_dev->dev_ops      = &mpipe_dev_ops;
1603         eth_dev->rx_pkt_burst = &mpipe_recv_pkts;
1604         eth_dev->tx_pkt_burst = &mpipe_xmit_pkts;
1605
1606         return 0;
1607 }
1608
1609 static struct rte_driver pmd_mpipe_xgbe_drv = {
1610         .name = "xgbe",
1611         .type = PMD_VDEV,
1612         .init = rte_pmd_mpipe_devinit,
1613 };
1614
1615 static struct rte_driver pmd_mpipe_gbe_drv = {
1616         .name = "gbe",
1617         .type = PMD_VDEV,
1618         .init = rte_pmd_mpipe_devinit,
1619 };
1620
1621 PMD_REGISTER_DRIVER(pmd_mpipe_xgbe_drv);
1622 PMD_REGISTER_DRIVER(pmd_mpipe_gbe_drv);
1623
1624 static void __attribute__((constructor, used))
1625 mpipe_init_contexts(void)
1626 {
1627         struct mpipe_context *context;
1628         int rc, instance;
1629
1630         for (instance = 0; instance < GXIO_MPIPE_INSTANCE_MAX; instance++) {
1631                 context = &mpipe_contexts[instance];
1632
1633                 rte_spinlock_init(&context->lock);
1634                 rc = gxio_mpipe_init(&context->context, instance);
1635                 if (rc < 0)
1636                         break;
1637         }
1638
1639         mpipe_instances = instance;
1640 }