net: align ethdev and eal driver names
[dpdk.git] / drivers / net / mpipe / mpipe_tilegx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015 EZchip Semiconductor Ltd. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of EZchip Semiconductor nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <unistd.h>
34
35 #include <rte_eal.h>
36 #include <rte_vdev.h>
37 #include <rte_eal_memconfig.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41
42 #include <arch/mpipe_xaui_def.h>
43 #include <arch/mpipe_gbe_def.h>
44
45 #include <gxio/mpipe.h>
46
47 #ifdef RTE_LIBRTE_MPIPE_PMD_DEBUG
48 #define PMD_DEBUG_RX(...)       RTE_LOG(DEBUG, PMD, __VA_ARGS__)
49 #define PMD_DEBUG_TX(...)       RTE_LOG(DEBUG, PMD, __VA_ARGS__)
50 #else
51 #define PMD_DEBUG_RX(...)
52 #define PMD_DEBUG_TX(...)
53 #endif
54
55 #define MPIPE_MAX_CHANNELS              128
56 #define MPIPE_TX_MAX_QUEUES             128
57 #define MPIPE_RX_MAX_QUEUES             16
58 #define MPIPE_TX_DESCS                  512
59 #define MPIPE_RX_BUCKETS                256
60 #define MPIPE_RX_STACK_SIZE             65536
61 #define MPIPE_RX_IP_ALIGN               2
62 #define MPIPE_BSM_ALIGN                 128
63
64 #define MPIPE_LINK_UPDATE_TIMEOUT       10      /*  s */
65 #define MPIPE_LINK_UPDATE_INTERVAL      100000  /* us */
66
67 struct mpipe_channel_config {
68         int enable;
69         int first_bucket;
70         int num_buckets;
71         int head_room;
72         gxio_mpipe_rules_stacks_t stacks;
73 };
74
75 struct mpipe_context {
76         rte_spinlock_t        lock;
77         gxio_mpipe_context_t  context;
78         struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
79 };
80
81 /* Per-core local data. */
82 struct mpipe_local {
83         int mbuf_push_debt[RTE_MAX_ETHPORTS];   /* Buffer push debt. */
84 } __rte_cache_aligned;
85
86 #define MPIPE_BUF_DEBT_THRESHOLD        32
87 static __thread struct mpipe_local mpipe_local;
88 static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
89 static int mpipe_instances;
90
91 /* Per queue statistics. */
92 struct mpipe_queue_stats {
93         uint64_t packets, bytes, errors, nomem;
94 };
95
96 /* Common tx/rx queue fields. */
97 struct mpipe_queue {
98         struct mpipe_dev_priv *priv;    /* "priv" data of its device. */
99         uint16_t nb_desc;               /* Number of tx descriptors. */
100         uint16_t port_id;               /* Device index. */
101         uint16_t stat_idx;              /* Queue stats index. */
102         uint8_t queue_idx;              /* Queue index. */
103         uint8_t link_status;            /* 0 = link down. */
104         struct mpipe_queue_stats stats; /* Stat data for the queue. */
105 };
106
107 /* Transmit queue description. */
108 struct mpipe_tx_queue {
109         struct mpipe_queue q;           /* Common stuff. */
110 };
111
112 /* Receive queue description. */
113 struct mpipe_rx_queue {
114         struct mpipe_queue q;           /* Common stuff. */
115         gxio_mpipe_iqueue_t iqueue;     /* mPIPE iqueue. */
116         gxio_mpipe_idesc_t *next_desc;  /* Next idesc to process. */
117         int avail_descs;                /* Number of available descs. */
118         void *rx_ring_mem;              /* DMA ring memory. */
119 };
120
121 struct mpipe_dev_priv {
122         gxio_mpipe_context_t *context;  /* mPIPE context. */
123         gxio_mpipe_link_t link;         /* mPIPE link for the device. */
124         gxio_mpipe_equeue_t equeue;     /* mPIPE equeue. */
125         unsigned equeue_size;           /* mPIPE equeue desc count. */
126         int instance;                   /* mPIPE instance. */
127         int ering;                      /* mPIPE eDMA ring. */
128         int stack;                      /* mPIPE buffer stack. */
129         int channel;                    /* Device channel. */
130         int port_id;                    /* DPDK port index. */
131         struct rte_eth_dev *eth_dev;    /* DPDK device. */
132         struct rte_mbuf **tx_comps;     /* TX completion array. */
133         struct rte_mempool *rx_mpool;   /* mpool used by the rx queues. */
134         unsigned rx_offset;             /* Receive head room. */
135         unsigned rx_size_code;          /* mPIPE rx buffer size code. */
136         int is_xaui:1,                  /* Is this an xgbe or gbe? */
137             initialized:1,              /* Initialized port? */
138             running:1;                  /* Running port? */
139         struct ether_addr mac_addr;     /* MAC address. */
140         unsigned nb_rx_queues;          /* Configured tx queues. */
141         unsigned nb_tx_queues;          /* Configured rx queues. */
142         int first_bucket;               /* mPIPE bucket start index. */
143         int first_ring;                 /* mPIPE notif ring start index. */
144         int notif_group;                /* mPIPE notif group. */
145         rte_atomic32_t dp_count __rte_cache_aligned;    /* DP Entry count. */
146         int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
147         int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
148 };
149
150 #define mpipe_priv(dev)                 \
151         ((struct mpipe_dev_priv*)(dev)->data->dev_private)
152
153 #define mpipe_name(priv)                \
154         ((priv)->eth_dev->data->name)
155
156 #define mpipe_rx_queue(priv, n)         \
157         ((struct mpipe_rx_queue *)(priv)->eth_dev->data->rx_queues[n])
158
159 #define mpipe_tx_queue(priv, n)         \
160         ((struct mpipe_tx_queue *)(priv)->eth_dev->data->tx_queues[n])
161
162 static void
163 mpipe_xmit_flush(struct mpipe_dev_priv *priv);
164
165 static void
166 mpipe_recv_flush(struct mpipe_dev_priv *priv);
167
168 static int mpipe_equeue_sizes[] = {
169         [GXIO_MPIPE_EQUEUE_ENTRY_512]   = 512,
170         [GXIO_MPIPE_EQUEUE_ENTRY_2K]    = 2048,
171         [GXIO_MPIPE_EQUEUE_ENTRY_8K]    = 8192,
172         [GXIO_MPIPE_EQUEUE_ENTRY_64K]   = 65536,
173 };
174
175 static int mpipe_iqueue_sizes[] = {
176         [GXIO_MPIPE_IQUEUE_ENTRY_128]   = 128,
177         [GXIO_MPIPE_IQUEUE_ENTRY_512]   = 512,
178         [GXIO_MPIPE_IQUEUE_ENTRY_2K]    = 2048,
179         [GXIO_MPIPE_IQUEUE_ENTRY_64K]   = 65536,
180 };
181
182 static int mpipe_buffer_sizes[] = {
183         [GXIO_MPIPE_BUFFER_SIZE_128]    = 128,
184         [GXIO_MPIPE_BUFFER_SIZE_256]    = 256,
185         [GXIO_MPIPE_BUFFER_SIZE_512]    = 512,
186         [GXIO_MPIPE_BUFFER_SIZE_1024]   = 1024,
187         [GXIO_MPIPE_BUFFER_SIZE_1664]   = 1664,
188         [GXIO_MPIPE_BUFFER_SIZE_4096]   = 4096,
189         [GXIO_MPIPE_BUFFER_SIZE_10368]  = 10368,
190         [GXIO_MPIPE_BUFFER_SIZE_16384]  = 16384,
191 };
192
193 static gxio_mpipe_context_t *
194 mpipe_context(int instance)
195 {
196         if (instance < 0 || instance >= mpipe_instances)
197                 return NULL;
198         return &mpipe_contexts[instance].context;
199 }
200
201 static int mpipe_channel_config(int instance, int channel,
202                                 struct mpipe_channel_config *config)
203 {
204         struct mpipe_channel_config *data;
205         struct mpipe_context *context;
206         gxio_mpipe_rules_t rules;
207         int idx, rc = 0;
208
209         if (instance < 0 || instance >= mpipe_instances ||
210             channel < 0 || channel >= MPIPE_MAX_CHANNELS)
211                 return -EINVAL;
212
213         context = &mpipe_contexts[instance];
214
215         rte_spinlock_lock(&context->lock);
216
217         gxio_mpipe_rules_init(&rules, &context->context);
218
219         for (idx = 0; idx < MPIPE_MAX_CHANNELS; idx++) {
220                 data = (channel == idx) ? config : &context->channels[idx];
221
222                 if (!data->enable)
223                         continue;
224
225                 rc = gxio_mpipe_rules_begin(&rules, data->first_bucket,
226                                             data->num_buckets, &data->stacks);
227                 if (rc < 0) {
228                         goto done;
229                 }
230
231                 rc = gxio_mpipe_rules_add_channel(&rules, idx);
232                 if (rc < 0) {
233                         goto done;
234                 }
235
236                 rc = gxio_mpipe_rules_set_headroom(&rules, data->head_room);
237                 if (rc < 0) {
238                         goto done;
239                 }
240         }
241
242         rc = gxio_mpipe_rules_commit(&rules);
243         if (rc == 0) {
244                 memcpy(&context->channels[channel], config, sizeof(*config));
245         }
246
247 done:
248         rte_spinlock_unlock(&context->lock);
249
250         return rc;
251 }
252
253 static int
254 mpipe_get_size_index(int *array, int count, int size,
255                      bool roundup)
256 {
257         int i, last = -1;
258
259         for (i = 0; i < count && array[i] < size; i++) {
260                 if (array[i])
261                         last = i;
262         }
263
264         if (roundup)
265                 return i < count ? (int)i : -ENOENT;
266         else
267                 return last >= 0 ? last : -ENOENT;
268 }
269
270 static int
271 mpipe_calc_size(int *array, int count, int size)
272 {
273         int index = mpipe_get_size_index(array, count, size, 1);
274         return index < 0 ? index : array[index];
275 }
276
277 static int mpipe_equeue_size(int size)
278 {
279         int result;
280         result = mpipe_calc_size(mpipe_equeue_sizes,
281                                  RTE_DIM(mpipe_equeue_sizes), size);
282         return result;
283 }
284
285 static int mpipe_iqueue_size(int size)
286 {
287         int result;
288         result = mpipe_calc_size(mpipe_iqueue_sizes,
289                                  RTE_DIM(mpipe_iqueue_sizes), size);
290         return result;
291 }
292
293 static int mpipe_buffer_size_index(int size)
294 {
295         int result;
296         result = mpipe_get_size_index(mpipe_buffer_sizes,
297                                       RTE_DIM(mpipe_buffer_sizes), size, 0);
298         return result;
299 }
300
301 static inline int
302 mpipe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
303                                   struct rte_eth_link *link)
304 {
305         struct rte_eth_link *dst = link;
306         struct rte_eth_link *src = &(dev->data->dev_link);
307
308         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
309                                 *(uint64_t *)src) == 0)
310                 return -1;
311
312         return 0;
313 }
314
315 static inline int
316 mpipe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
317                                    struct rte_eth_link *link)
318 {
319         struct rte_eth_link *dst = &(dev->data->dev_link);
320         struct rte_eth_link *src = link;
321
322         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
323                                 *(uint64_t *)src) == 0)
324                 return -1;
325
326         return 0;
327 }
328
329 static void
330 mpipe_infos_get(struct rte_eth_dev *dev __rte_unused,
331                 struct rte_eth_dev_info *dev_info)
332 {
333         dev_info->min_rx_bufsize  = 128;
334         dev_info->max_rx_pktlen   = 1518;
335         dev_info->max_tx_queues   = MPIPE_TX_MAX_QUEUES;
336         dev_info->max_rx_queues   = MPIPE_RX_MAX_QUEUES;
337         dev_info->max_mac_addrs   = 1;
338         dev_info->rx_offload_capa = 0;
339         dev_info->tx_offload_capa = 0;
340 }
341
342 static int
343 mpipe_configure(struct rte_eth_dev *dev)
344 {
345         struct mpipe_dev_priv *priv = mpipe_priv(dev);
346
347         if (dev->data->nb_tx_queues > MPIPE_TX_MAX_QUEUES) {
348                 RTE_LOG(ERR, PMD, "%s: Too many tx queues: %d > %d\n",
349                         mpipe_name(priv), dev->data->nb_tx_queues,
350                         MPIPE_TX_MAX_QUEUES);
351                 return -EINVAL;
352         }
353         priv->nb_tx_queues = dev->data->nb_tx_queues;
354
355         if (dev->data->nb_rx_queues > MPIPE_RX_MAX_QUEUES) {
356                 RTE_LOG(ERR, PMD, "%s: Too many rx queues: %d > %d\n",
357                         mpipe_name(priv), dev->data->nb_rx_queues,
358                         MPIPE_RX_MAX_QUEUES);
359         }
360         priv->nb_rx_queues = dev->data->nb_rx_queues;
361
362         return 0;
363 }
364
365 static inline int
366 mpipe_link_compare(struct rte_eth_link *link1,
367                    struct rte_eth_link *link2)
368 {
369         return (*(uint64_t *)link1 == *(uint64_t *)link2)
370                 ? -1 : 0;
371 }
372
373 static int
374 mpipe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
375 {
376         struct mpipe_dev_priv *priv = mpipe_priv(dev);
377         struct rte_eth_link old, new;
378         int64_t state, speed;
379         int count, rc;
380
381         memset(&old, 0, sizeof(old));
382         memset(&new, 0, sizeof(new));
383         mpipe_dev_atomic_read_link_status(dev, &old);
384
385         for (count = 0, rc = 0; count < MPIPE_LINK_UPDATE_TIMEOUT; count++) {
386                 if (!priv->initialized)
387                         break;
388
389                 state = gxio_mpipe_link_get_attr(&priv->link,
390                                                  GXIO_MPIPE_LINK_CURRENT_STATE);
391                 if (state < 0)
392                         break;
393
394                 speed = state & GXIO_MPIPE_LINK_SPEED_MASK;
395
396                 new.link_autoneg = (dev->data->dev_conf.link_speeds &
397                                 ETH_LINK_SPEED_AUTONEG);
398                 if (speed == GXIO_MPIPE_LINK_1G) {
399                         new.link_speed = ETH_SPEED_NUM_1G;
400                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
401                         new.link_status = ETH_LINK_UP;
402                 } else if (speed == GXIO_MPIPE_LINK_10G) {
403                         new.link_speed = ETH_SPEED_NUM_10G;
404                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
405                         new.link_status = ETH_LINK_UP;
406                 }
407
408                 rc = mpipe_link_compare(&old, &new);
409                 if (rc == 0 || !wait_to_complete)
410                         break;
411
412                 rte_delay_us(MPIPE_LINK_UPDATE_INTERVAL);
413         }
414
415         mpipe_dev_atomic_write_link_status(dev, &new);
416         return rc;
417 }
418
419 static int
420 mpipe_set_link(struct rte_eth_dev *dev, int up)
421 {
422         struct mpipe_dev_priv *priv = mpipe_priv(dev);
423         int rc;
424
425         rc = gxio_mpipe_link_set_attr(&priv->link,
426                                       GXIO_MPIPE_LINK_DESIRED_STATE,
427                                       up ? GXIO_MPIPE_LINK_ANYSPEED : 0);
428         if (rc < 0) {
429                 RTE_LOG(ERR, PMD, "%s: Failed to set link %s.\n",
430                         mpipe_name(priv), up ? "up" : "down");
431         } else {
432                 mpipe_link_update(dev, 0);
433         }
434
435         return rc;
436 }
437
438 static int
439 mpipe_set_link_up(struct rte_eth_dev *dev)
440 {
441         return mpipe_set_link(dev, 1);
442 }
443
444 static int
445 mpipe_set_link_down(struct rte_eth_dev *dev)
446 {
447         return mpipe_set_link(dev, 0);
448 }
449
450 static inline void
451 mpipe_dp_enter(struct mpipe_dev_priv *priv)
452 {
453         __insn_mtspr(SPR_DSTREAM_PF, 0);
454         rte_atomic32_inc(&priv->dp_count);
455 }
456
457 static inline void
458 mpipe_dp_exit(struct mpipe_dev_priv *priv)
459 {
460         rte_atomic32_dec(&priv->dp_count);
461 }
462
463 static inline void
464 mpipe_dp_wait(struct mpipe_dev_priv *priv)
465 {
466         while (rte_atomic32_read(&priv->dp_count) != 0) {
467                 rte_pause();
468         }
469 }
470
471 static inline int
472 mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
473 {
474         return (mbuf->port < RTE_MAX_ETHPORTS) ?
475                 mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
476                 priv->stack;
477 }
478
479 static inline struct rte_mbuf *
480 mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
481                 int in_port)
482 {
483         void *va = gxio_mpipe_idesc_get_va(idesc);
484         uint16_t size = gxio_mpipe_idesc_get_xfer_size(idesc);
485         struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset);
486
487         rte_pktmbuf_reset(mbuf);
488         mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
489         mbuf->port     = in_port;
490         mbuf->data_len = size;
491         mbuf->pkt_len  = size;
492         mbuf->hash.rss = gxio_mpipe_idesc_get_flow_hash(idesc);
493
494         PMD_DEBUG_RX("%s: RX mbuf %p, buffer %p, buf_addr %p, size %d\n",
495                      mpipe_name(priv), mbuf, va, mbuf->buf_addr, size);
496
497         return mbuf;
498 }
499
500 static inline void
501 mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
502 {
503         const int offset = RTE_PKTMBUF_HEADROOM + MPIPE_RX_IP_ALIGN;
504         void *buf_addr = RTE_PTR_ADD(mbuf->buf_addr, offset);
505
506         gxio_mpipe_push_buffer(priv->context, priv->stack, buf_addr);
507         PMD_DEBUG_RX("%s: Pushed mbuf %p, buffer %p into stack %d\n",
508                      mpipe_name(priv), mbuf, buf_addr, priv->stack);
509 }
510
511 static inline void
512 mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
513 {
514         struct rte_mbuf *mbuf;
515         int i;
516
517         for (i = 0; i < count; i++) {
518                 mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
519                 if (!mbuf)
520                         break;
521                 mpipe_recv_push(priv, mbuf);
522         }
523
524         PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count);
525 }
526
527 static inline void
528 mpipe_recv_flush_stack(struct mpipe_dev_priv *priv)
529 {
530         const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK;
531         uint8_t in_port = priv->port_id;
532         struct rte_mbuf *mbuf;
533         void *va;
534
535         while (1) {
536                 va = gxio_mpipe_pop_buffer(priv->context, priv->stack);
537                 if (!va)
538                         break;
539                 mbuf = RTE_PTR_SUB(va, offset);
540
541                 PMD_DEBUG_RX("%s: Flushing mbuf %p, va %p\n",
542                              mpipe_name(priv), mbuf, va);
543
544                 mbuf->data_off    = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
545                 mbuf->refcnt      = 1;
546                 mbuf->nb_segs     = 1;
547                 mbuf->port        = in_port;
548                 mbuf->packet_type = 0;
549                 mbuf->data_len    = 0;
550                 mbuf->pkt_len     = 0;
551
552                 __rte_mbuf_raw_free(mbuf);
553         }
554 }
555
556 static void
557 mpipe_register_segment(struct mpipe_dev_priv *priv, const struct rte_memseg *ms)
558 {
559         size_t size = ms->hugepage_sz;
560         uint8_t *addr, *end;
561         int rc;
562
563         for (addr = ms->addr, end = addr + ms->len; addr < end; addr += size) {
564                 rc = gxio_mpipe_register_page(priv->context, priv->stack, addr,
565                                               size, 0);
566                 if (rc < 0)
567                         break;
568         }
569
570         if (rc < 0) {
571                 RTE_LOG(ERR, PMD, "%s: Could not register memseg @%p, %d.\n",
572                         mpipe_name(priv), ms->addr, rc);
573         } else {
574                 RTE_LOG(DEBUG, PMD, "%s: Registered segment %p - %p\n",
575                         mpipe_name(priv), ms->addr,
576                         RTE_PTR_ADD(ms->addr, ms->len - 1));
577         }
578 }
579
580 static int
581 mpipe_recv_init(struct mpipe_dev_priv *priv)
582 {
583         const struct rte_memseg *seg = rte_eal_get_physmem_layout();
584         size_t stack_size;
585         void *stack_mem;
586         int rc;
587
588         if (!priv->rx_mpool) {
589                 RTE_LOG(ERR, PMD, "%s: No buffer pool.\n",
590                         mpipe_name(priv));
591                 return -ENODEV;
592         }
593
594         /* Allocate one NotifRing for each queue. */
595         rc = gxio_mpipe_alloc_notif_rings(priv->context, MPIPE_RX_MAX_QUEUES,
596                                           0, 0);
597         if (rc < 0) {
598                 RTE_LOG(ERR, PMD, "%s: Failed to allocate notif rings.\n",
599                         mpipe_name(priv));
600                 return rc;
601         }
602         priv->first_ring = rc;
603
604         /* Allocate a NotifGroup. */
605         rc = gxio_mpipe_alloc_notif_groups(priv->context, 1, 0, 0);
606         if (rc < 0) {
607                 RTE_LOG(ERR, PMD, "%s: Failed to allocate rx group.\n",
608                         mpipe_name(priv));
609                 return rc;
610         }
611         priv->notif_group = rc;
612
613         /* Allocate required buckets. */
614         rc = gxio_mpipe_alloc_buckets(priv->context, MPIPE_RX_BUCKETS, 0, 0);
615         if (rc < 0) {
616                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buckets.\n",
617                         mpipe_name(priv));
618                 return rc;
619         }
620         priv->first_bucket = rc;
621
622         rc = gxio_mpipe_alloc_buffer_stacks(priv->context, 1, 0, 0);
623         if (rc < 0) {
624                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer stack.\n",
625                         mpipe_name(priv));
626                 return rc;
627         }
628         priv->stack = rc;
629
630         while (seg && seg->addr)
631                 mpipe_register_segment(priv, seg++);
632
633         stack_size = gxio_mpipe_calc_buffer_stack_bytes(MPIPE_RX_STACK_SIZE);
634         stack_mem = rte_zmalloc(NULL, stack_size, 65536);
635         if (!stack_mem) {
636                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer memory.\n",
637                         mpipe_name(priv));
638                 return -ENOMEM;
639         } else {
640                 RTE_LOG(DEBUG, PMD, "%s: Buffer stack memory %p - %p.\n",
641                         mpipe_name(priv), stack_mem,
642                         RTE_PTR_ADD(stack_mem, stack_size - 1));
643         }
644
645         rc = gxio_mpipe_init_buffer_stack(priv->context, priv->stack,
646                                           priv->rx_size_code, stack_mem,
647                                           stack_size, 0);
648         if (rc < 0) {
649                 RTE_LOG(ERR, PMD, "%s: Failed to initialize buffer stack.\n",
650                         mpipe_name(priv));
651                 return rc;
652         }
653
654         return 0;
655 }
656
657 static int
658 mpipe_xmit_init(struct mpipe_dev_priv *priv)
659 {
660         size_t ring_size;
661         void *ring_mem;
662         int rc;
663
664         /* Allocate eDMA ring. */
665         rc = gxio_mpipe_alloc_edma_rings(priv->context, 1, 0, 0);
666         if (rc < 0) {
667                 RTE_LOG(ERR, PMD, "%s: Failed to alloc tx ring.\n",
668                         mpipe_name(priv));
669                 return rc;
670         }
671         priv->ering = rc;
672
673         rc = mpipe_equeue_size(MPIPE_TX_DESCS);
674         if (rc < 0) {
675                 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d equeue descs.\n",
676                         mpipe_name(priv), (int)MPIPE_TX_DESCS);
677                 return -ENOMEM;
678         }
679         priv->equeue_size = rc;
680
681         /* Initialize completion array. */
682         ring_size = sizeof(priv->tx_comps[0]) * priv->equeue_size;
683         priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE);
684         if (!priv->tx_comps) {
685                 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress comps.\n",
686                         mpipe_name(priv));
687                 return -ENOMEM;
688         }
689
690         /* Allocate eDMA ring memory. */
691         ring_size = sizeof(gxio_mpipe_edesc_t) * priv->equeue_size;
692         ring_mem = rte_zmalloc(NULL, ring_size, ring_size);
693         if (!ring_mem) {
694                 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress descs.\n",
695                         mpipe_name(priv));
696                 return -ENOMEM;
697         } else {
698                 RTE_LOG(DEBUG, PMD, "%s: eDMA ring memory %p - %p.\n",
699                         mpipe_name(priv), ring_mem,
700                         RTE_PTR_ADD(ring_mem, ring_size - 1));
701         }
702
703         /* Initialize eDMA ring. */
704         rc = gxio_mpipe_equeue_init(&priv->equeue, priv->context, priv->ering,
705                                     priv->channel, ring_mem, ring_size, 0);
706         if (rc < 0) {
707                 RTE_LOG(ERR, PMD, "%s: Failed to init equeue\n",
708                         mpipe_name(priv));
709                 return rc;
710         }
711
712         return 0;
713 }
714
715 static int
716 mpipe_link_init(struct mpipe_dev_priv *priv)
717 {
718         int rc;
719
720         /* Open the link. */
721         rc = gxio_mpipe_link_open(&priv->link, priv->context,
722                                   mpipe_name(priv), GXIO_MPIPE_LINK_AUTO_NONE);
723         if (rc < 0) {
724                 RTE_LOG(ERR, PMD, "%s: Failed to open link.\n",
725                         mpipe_name(priv));
726                 return rc;
727         }
728
729         /* Get the channel index. */
730         rc = gxio_mpipe_link_channel(&priv->link);
731         if (rc < 0) {
732                 RTE_LOG(ERR, PMD, "%s: Bad channel\n",
733                         mpipe_name(priv));
734                 return rc;
735         }
736         priv->channel = rc;
737
738         return 0;
739 }
740
741 static int
742 mpipe_init(struct mpipe_dev_priv *priv)
743 {
744         int rc;
745
746         if (priv->initialized)
747                 return 0;
748
749         rc = mpipe_recv_init(priv);
750         if (rc < 0) {
751                 RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n",
752                         mpipe_name(priv));
753                 return rc;
754         }
755
756         rc = mpipe_xmit_init(priv);
757         if (rc < 0) {
758                 RTE_LOG(ERR, PMD, "%s: Failed to init tx.\n",
759                         mpipe_name(priv));
760                 rte_free(priv);
761                 return rc;
762         }
763
764         priv->initialized = 1;
765
766         return 0;
767 }
768
769 static int
770 mpipe_start(struct rte_eth_dev *dev)
771 {
772         struct mpipe_dev_priv *priv = mpipe_priv(dev);
773         struct mpipe_channel_config config;
774         struct mpipe_rx_queue *rx_queue;
775         struct rte_eth_link eth_link;
776         unsigned queue, buffers = 0;
777         size_t ring_size;
778         void *ring_mem;
779         int rc;
780
781         memset(&eth_link, 0, sizeof(eth_link));
782         mpipe_dev_atomic_write_link_status(dev, &eth_link);
783
784         rc = mpipe_init(priv);
785         if (rc < 0)
786                 return rc;
787
788         /* Initialize NotifRings. */
789         for (queue = 0; queue < priv->nb_rx_queues; queue++) {
790                 rx_queue = mpipe_rx_queue(priv, queue);
791                 ring_size = rx_queue->q.nb_desc * sizeof(gxio_mpipe_idesc_t);
792
793                 ring_mem = rte_malloc(NULL, ring_size, ring_size);
794                 if (!ring_mem) {
795                         RTE_LOG(ERR, PMD, "%s: Failed to alloc rx descs.\n",
796                                 mpipe_name(priv));
797                         return -ENOMEM;
798                 } else {
799                         RTE_LOG(DEBUG, PMD, "%s: iDMA ring %d memory %p - %p.\n",
800                                 mpipe_name(priv), queue, ring_mem,
801                                 RTE_PTR_ADD(ring_mem, ring_size - 1));
802                 }
803
804                 rc = gxio_mpipe_iqueue_init(&rx_queue->iqueue, priv->context,
805                                             priv->first_ring + queue, ring_mem,
806                                             ring_size, 0);
807                 if (rc < 0) {
808                         RTE_LOG(ERR, PMD, "%s: Failed to init rx queue.\n",
809                                 mpipe_name(priv));
810                         return rc;
811                 }
812
813                 rx_queue->rx_ring_mem = ring_mem;
814                 buffers += rx_queue->q.nb_desc;
815         }
816
817         /* Initialize ingress NotifGroup and buckets. */
818         rc = gxio_mpipe_init_notif_group_and_buckets(priv->context,
819                         priv->notif_group, priv->first_ring, priv->nb_rx_queues,
820                         priv->first_bucket, MPIPE_RX_BUCKETS,
821                         GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY);
822         if (rc < 0) {
823                 RTE_LOG(ERR, PMD, "%s: Failed to init group and buckets.\n",
824                         mpipe_name(priv));
825                 return rc;
826         }
827
828         /* Configure the classifier to deliver packets from this port. */
829         config.enable = 1;
830         config.first_bucket = priv->first_bucket;
831         config.num_buckets = MPIPE_RX_BUCKETS;
832         memset(&config.stacks, 0xff, sizeof(config.stacks));
833         config.stacks.stacks[priv->rx_size_code] = priv->stack;
834         config.head_room = priv->rx_offset & RTE_MEMPOOL_ALIGN_MASK;
835
836         rc = mpipe_channel_config(priv->instance, priv->channel,
837                                   &config);
838         if (rc < 0) {
839                 RTE_LOG(ERR, PMD, "%s: Failed to setup classifier.\n",
840                         mpipe_name(priv));
841                 return rc;
842         }
843
844         /* Fill empty buffers into the buffer stack. */
845         mpipe_recv_fill_stack(priv, buffers);
846
847         /* Bring up the link. */
848         mpipe_set_link_up(dev);
849
850         /* Start xmit/recv on queues. */
851         for (queue = 0; queue < priv->nb_tx_queues; queue++)
852                 mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
853         for (queue = 0; queue < priv->nb_rx_queues; queue++)
854                 mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
855         priv->running = 1;
856
857         return 0;
858 }
859
860 static void
861 mpipe_stop(struct rte_eth_dev *dev)
862 {
863         struct mpipe_dev_priv *priv = mpipe_priv(dev);
864         struct mpipe_channel_config config;
865         unsigned queue;
866         int rc;
867
868         for (queue = 0; queue < priv->nb_tx_queues; queue++)
869                 mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
870         for (queue = 0; queue < priv->nb_rx_queues; queue++)
871                 mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
872
873         /* Make sure the link_status writes land. */
874         rte_wmb();
875
876         /*
877          * Wait for link_status change to register with straggling datapath
878          * threads.
879          */
880         mpipe_dp_wait(priv);
881
882         /* Bring down the link. */
883         mpipe_set_link_down(dev);
884
885         /* Remove classifier rules. */
886         memset(&config, 0, sizeof(config));
887         rc = mpipe_channel_config(priv->instance, priv->channel,
888                                   &config);
889         if (rc < 0) {
890                 RTE_LOG(ERR, PMD, "%s: Failed to stop classifier.\n",
891                         mpipe_name(priv));
892         }
893
894         /* Flush completed xmit packets. */
895         mpipe_xmit_flush(priv);
896
897         /* Flush buffer stacks. */
898         mpipe_recv_flush(priv);
899
900         priv->running = 0;
901 }
902
903 static void
904 mpipe_close(struct rte_eth_dev *dev)
905 {
906         struct mpipe_dev_priv *priv = mpipe_priv(dev);
907         if (priv->running)
908                 mpipe_stop(dev);
909 }
910
911 static void
912 mpipe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
913 {
914         struct mpipe_dev_priv *priv = mpipe_priv(dev);
915         struct mpipe_tx_queue *tx_queue;
916         struct mpipe_rx_queue *rx_queue;
917         unsigned i;
918         uint16_t idx;
919
920         memset(stats, 0, sizeof(*stats));
921
922         for (i = 0; i < priv->nb_tx_queues; i++) {
923                 tx_queue = mpipe_tx_queue(priv, i);
924
925                 stats->opackets += tx_queue->q.stats.packets;
926                 stats->obytes   += tx_queue->q.stats.bytes;
927                 stats->oerrors  += tx_queue->q.stats.errors;
928
929                 idx = tx_queue->q.stat_idx;
930                 if (idx != (uint16_t)-1) {
931                         stats->q_opackets[idx] += tx_queue->q.stats.packets;
932                         stats->q_obytes[idx]   += tx_queue->q.stats.bytes;
933                         stats->q_errors[idx]   += tx_queue->q.stats.errors;
934                 }
935         }
936
937         for (i = 0; i < priv->nb_rx_queues; i++) {
938                 rx_queue = mpipe_rx_queue(priv, i);
939
940                 stats->ipackets  += rx_queue->q.stats.packets;
941                 stats->ibytes    += rx_queue->q.stats.bytes;
942                 stats->ierrors   += rx_queue->q.stats.errors;
943                 stats->rx_nombuf += rx_queue->q.stats.nomem;
944
945                 idx = rx_queue->q.stat_idx;
946                 if (idx != (uint16_t)-1) {
947                         stats->q_ipackets[idx] += rx_queue->q.stats.packets;
948                         stats->q_ibytes[idx]   += rx_queue->q.stats.bytes;
949                         stats->q_errors[idx]   += rx_queue->q.stats.errors;
950                 }
951         }
952 }
953
954 static void
955 mpipe_stats_reset(struct rte_eth_dev *dev)
956 {
957         struct mpipe_dev_priv *priv = mpipe_priv(dev);
958         struct mpipe_tx_queue *tx_queue;
959         struct mpipe_rx_queue *rx_queue;
960         unsigned i;
961
962         for (i = 0; i < priv->nb_tx_queues; i++) {
963                 tx_queue = mpipe_tx_queue(priv, i);
964                 memset(&tx_queue->q.stats, 0, sizeof(tx_queue->q.stats));
965         }
966
967         for (i = 0; i < priv->nb_rx_queues; i++) {
968                 rx_queue = mpipe_rx_queue(priv, i);
969                 memset(&rx_queue->q.stats, 0, sizeof(rx_queue->q.stats));
970         }
971 }
972
973 static int
974 mpipe_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id,
975                               uint8_t stat_idx, uint8_t is_rx)
976 {
977         struct mpipe_dev_priv *priv = mpipe_priv(dev);
978
979         if (is_rx) {
980                 priv->rx_stat_mapping[stat_idx] = queue_id;
981         } else {
982                 priv->tx_stat_mapping[stat_idx] = queue_id;
983         }
984
985         return 0;
986 }
987
988 static int
989 mpipe_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
990                      uint16_t nb_desc, unsigned int socket_id __rte_unused,
991                      const struct rte_eth_txconf *tx_conf __rte_unused)
992 {
993         struct mpipe_tx_queue *tx_queue = dev->data->tx_queues[queue_idx];
994         struct mpipe_dev_priv *priv = mpipe_priv(dev);
995         uint16_t idx;
996
997         tx_queue = rte_realloc(tx_queue, sizeof(*tx_queue),
998                                RTE_CACHE_LINE_SIZE);
999         if (!tx_queue) {
1000                 RTE_LOG(ERR, PMD, "%s: Failed to allocate TX queue.\n",
1001                         mpipe_name(priv));
1002                 return -ENOMEM;
1003         }
1004
1005         memset(&tx_queue->q, 0, sizeof(tx_queue->q));
1006         tx_queue->q.priv = priv;
1007         tx_queue->q.queue_idx = queue_idx;
1008         tx_queue->q.port_id = dev->data->port_id;
1009         tx_queue->q.nb_desc = nb_desc;
1010
1011         tx_queue->q.stat_idx = -1;
1012         for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1013                 if (priv->tx_stat_mapping[idx] == queue_idx)
1014                         tx_queue->q.stat_idx = idx;
1015         }
1016
1017         dev->data->tx_queues[queue_idx] = tx_queue;
1018
1019         return 0;
1020 }
1021
1022 static void
1023 mpipe_tx_queue_release(void *_txq)
1024 {
1025         rte_free(_txq);
1026 }
1027
1028 static int
1029 mpipe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1030                      uint16_t nb_desc, unsigned int socket_id __rte_unused,
1031                      const struct rte_eth_rxconf *rx_conf __rte_unused,
1032                      struct rte_mempool *mp)
1033 {
1034         struct mpipe_rx_queue *rx_queue = dev->data->rx_queues[queue_idx];
1035         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1036         uint16_t idx;
1037         int size, rc;
1038
1039         rc = mpipe_iqueue_size(nb_desc);
1040         if (rc < 0) {
1041                 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d iqueue descs.\n",
1042                         mpipe_name(priv), (int)nb_desc);
1043                 return -ENOMEM;
1044         }
1045
1046         if (rc != nb_desc) {
1047                 RTE_LOG(WARNING, PMD, "%s: Extending RX descs from %d to %d.\n",
1048                         mpipe_name(priv), (int)nb_desc, rc);
1049                 nb_desc = rc;
1050         }
1051
1052         size = sizeof(*rx_queue);
1053         rx_queue = rte_realloc(rx_queue, size, RTE_CACHE_LINE_SIZE);
1054         if (!rx_queue) {
1055                 RTE_LOG(ERR, PMD, "%s: Failed to allocate RX queue.\n",
1056                         mpipe_name(priv));
1057                 return -ENOMEM;
1058         }
1059
1060         memset(&rx_queue->q, 0, sizeof(rx_queue->q));
1061         rx_queue->q.priv = priv;
1062         rx_queue->q.nb_desc = nb_desc;
1063         rx_queue->q.port_id = dev->data->port_id;
1064         rx_queue->q.queue_idx = queue_idx;
1065
1066         if (!priv->rx_mpool) {
1067                 int size = (rte_pktmbuf_data_room_size(mp) -
1068                             RTE_PKTMBUF_HEADROOM -
1069                             MPIPE_RX_IP_ALIGN);
1070
1071                 priv->rx_offset = (sizeof(struct rte_mbuf) +
1072                                    rte_pktmbuf_priv_size(mp) +
1073                                    RTE_PKTMBUF_HEADROOM +
1074                                    MPIPE_RX_IP_ALIGN);
1075                 if (size < 0) {
1076                         RTE_LOG(ERR, PMD, "%s: Bad buffer size %d.\n",
1077                                 mpipe_name(priv),
1078                                 rte_pktmbuf_data_room_size(mp));
1079                         return -ENOMEM;
1080                 }
1081
1082                 priv->rx_size_code = mpipe_buffer_size_index(size);
1083                 priv->rx_mpool = mp;
1084         }
1085
1086         if (priv->rx_mpool != mp) {
1087                 RTE_LOG(WARNING, PMD, "%s: Ignoring multiple buffer pools.\n",
1088                         mpipe_name(priv));
1089         }
1090
1091         rx_queue->q.stat_idx = -1;
1092         for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1093                 if (priv->rx_stat_mapping[idx] == queue_idx)
1094                         rx_queue->q.stat_idx = idx;
1095         }
1096
1097         dev->data->rx_queues[queue_idx] = rx_queue;
1098
1099         return 0;
1100 }
1101
1102 static void
1103 mpipe_rx_queue_release(void *_rxq)
1104 {
1105         rte_free(_rxq);
1106 }
1107
1108 #define MPIPE_XGBE_ENA_HASH_MULTI       \
1109         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT)
1110 #define MPIPE_XGBE_ENA_HASH_UNI         \
1111         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT)
1112 #define MPIPE_XGBE_COPY_ALL             \
1113         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT)
1114 #define MPIPE_GBE_ENA_MULTI_HASH        \
1115         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT)
1116 #define MPIPE_GBE_ENA_UNI_HASH          \
1117         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT)
1118 #define MPIPE_GBE_COPY_ALL              \
1119         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT)
1120
1121 static void
1122 mpipe_promiscuous_enable(struct rte_eth_dev *dev)
1123 {
1124         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1125         int64_t reg;
1126         int addr;
1127
1128         if (priv->is_xaui) {
1129                 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1130                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1131                 reg &= ~MPIPE_XGBE_ENA_HASH_MULTI;
1132                 reg &= ~MPIPE_XGBE_ENA_HASH_UNI;
1133                 reg |=  MPIPE_XGBE_COPY_ALL;
1134                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1135         } else {
1136                 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1137                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1138                 reg &= ~MPIPE_GBE_ENA_MULTI_HASH;
1139                 reg &= ~MPIPE_GBE_ENA_UNI_HASH;
1140                 reg |=  MPIPE_GBE_COPY_ALL;
1141                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1142         }
1143 }
1144
1145 static void
1146 mpipe_promiscuous_disable(struct rte_eth_dev *dev)
1147 {
1148         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1149         int64_t reg;
1150         int addr;
1151
1152         if (priv->is_xaui) {
1153                 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1154                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1155                 reg |=  MPIPE_XGBE_ENA_HASH_MULTI;
1156                 reg |=  MPIPE_XGBE_ENA_HASH_UNI;
1157                 reg &= ~MPIPE_XGBE_COPY_ALL;
1158                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1159         } else {
1160                 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1161                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1162                 reg |=  MPIPE_GBE_ENA_MULTI_HASH;
1163                 reg |=  MPIPE_GBE_ENA_UNI_HASH;
1164                 reg &= ~MPIPE_GBE_COPY_ALL;
1165                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1166         }
1167 }
1168
1169 static const struct eth_dev_ops mpipe_dev_ops = {
1170         .dev_infos_get           = mpipe_infos_get,
1171         .dev_configure           = mpipe_configure,
1172         .dev_start               = mpipe_start,
1173         .dev_stop                = mpipe_stop,
1174         .dev_close               = mpipe_close,
1175         .stats_get               = mpipe_stats_get,
1176         .stats_reset             = mpipe_stats_reset,
1177         .queue_stats_mapping_set = mpipe_queue_stats_mapping_set,
1178         .tx_queue_setup          = mpipe_tx_queue_setup,
1179         .rx_queue_setup          = mpipe_rx_queue_setup,
1180         .tx_queue_release        = mpipe_tx_queue_release,
1181         .rx_queue_release        = mpipe_rx_queue_release,
1182         .link_update             = mpipe_link_update,
1183         .dev_set_link_up         = mpipe_set_link_up,
1184         .dev_set_link_down       = mpipe_set_link_down,
1185         .promiscuous_enable      = mpipe_promiscuous_enable,
1186         .promiscuous_disable     = mpipe_promiscuous_disable,
1187 };
1188
1189 static inline void
1190 mpipe_xmit_null(struct mpipe_dev_priv *priv, int64_t start, int64_t end)
1191 {
1192         gxio_mpipe_edesc_t null_desc = { { .bound = 1, .ns = 1 } };
1193         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1194         int64_t slot;
1195
1196         for (slot = start; slot < end; slot++) {
1197                 gxio_mpipe_equeue_put_at(equeue, null_desc, slot);
1198         }
1199 }
1200
1201 static void
1202 mpipe_xmit_flush(struct mpipe_dev_priv *priv)
1203 {
1204         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1205         int64_t slot;
1206
1207         /* Post a dummy descriptor and wait for its return. */
1208         slot = gxio_mpipe_equeue_reserve(equeue, 1);
1209         if (slot < 0) {
1210                 RTE_LOG(ERR, PMD, "%s: Failed to reserve stop slot.\n",
1211                         mpipe_name(priv));
1212                 return;
1213         }
1214
1215         mpipe_xmit_null(priv, slot, slot + 1);
1216
1217         while (!gxio_mpipe_equeue_is_complete(equeue, slot, 1)) {
1218                 rte_pause();
1219         }
1220
1221         for (slot = 0; slot < priv->equeue_size; slot++) {
1222                 if (priv->tx_comps[slot])
1223                         rte_pktmbuf_free_seg(priv->tx_comps[slot]);
1224         }
1225 }
1226
1227 static void
1228 mpipe_recv_flush(struct mpipe_dev_priv *priv)
1229 {
1230         uint8_t in_port = priv->port_id;
1231         struct mpipe_rx_queue *rx_queue;
1232         gxio_mpipe_iqueue_t *iqueue;
1233         gxio_mpipe_idesc_t idesc;
1234         struct rte_mbuf *mbuf;
1235         unsigned queue;
1236
1237         /* Release packets on the buffer stack. */
1238         mpipe_recv_flush_stack(priv);
1239
1240         /* Flush packets sitting in recv queues. */
1241         for (queue = 0; queue < priv->nb_rx_queues; queue++) {
1242                 rx_queue = mpipe_rx_queue(priv, queue);
1243                 iqueue = &rx_queue->iqueue;
1244                 while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
1245                         /* Skip idesc with the 'buffer error' bit set. */
1246                         if (idesc.be)
1247                                 continue;
1248                         mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
1249                         rte_pktmbuf_free(mbuf);
1250                 }
1251                 rte_free(rx_queue->rx_ring_mem);
1252         }
1253 }
1254
1255 static inline uint16_t
1256 mpipe_do_xmit(struct mpipe_tx_queue *tx_queue, struct rte_mbuf **tx_pkts,
1257               uint16_t nb_pkts)
1258 {
1259         struct mpipe_dev_priv *priv = tx_queue->q.priv;
1260         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1261         unsigned nb_bytes = 0;
1262         unsigned nb_sent = 0;
1263         int nb_slots, i;
1264         uint8_t port_id;
1265
1266         PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
1267                      nb_pkts, mpipe_name(tx_queue->q.priv),
1268                      tx_queue->q.queue_idx);
1269
1270         /* Optimistic assumption that we need exactly one slot per packet. */
1271         nb_slots = RTE_MIN(nb_pkts, MPIPE_TX_DESCS / 2);
1272
1273         do {
1274                 struct rte_mbuf *mbuf = NULL, *pkt = NULL;
1275                 int64_t slot;
1276
1277                 /* Reserve eDMA ring slots. */
1278                 slot = gxio_mpipe_equeue_try_reserve_fast(equeue, nb_slots);
1279                 if (unlikely(slot < 0)) {
1280                         break;
1281                 }
1282
1283                 for (i = 0; i < nb_slots; i++) {
1284                         unsigned idx = (slot + i) & (priv->equeue_size - 1);
1285                         rte_prefetch0(priv->tx_comps[idx]);
1286                 }
1287
1288                 /* Fill up slots with descriptor and completion info. */
1289                 for (i = 0; i < nb_slots; i++) {
1290                         unsigned idx = (slot + i) & (priv->equeue_size - 1);
1291                         gxio_mpipe_edesc_t desc;
1292                         struct rte_mbuf *next;
1293
1294                         /* Starting on a new packet? */
1295                         if (likely(!mbuf)) {
1296                                 int room = nb_slots - i;
1297
1298                                 pkt = mbuf = tx_pkts[nb_sent];
1299
1300                                 /* Bail out if we run out of descs. */
1301                                 if (unlikely(pkt->nb_segs > room))
1302                                         break;
1303
1304                                 nb_sent++;
1305                         }
1306
1307                         /* We have a segment to send. */
1308                         next = mbuf->next;
1309
1310                         if (priv->tx_comps[idx])
1311                                 rte_pktmbuf_free_seg(priv->tx_comps[idx]);
1312
1313                         port_id = (mbuf->port < RTE_MAX_ETHPORTS) ?
1314                                                 mbuf->port : priv->port_id;
1315                         desc = (gxio_mpipe_edesc_t) { {
1316                                 .va        = rte_pktmbuf_mtod(mbuf, uintptr_t),
1317                                 .xfer_size = rte_pktmbuf_data_len(mbuf),
1318                                 .bound     = next ? 0 : 1,
1319                                 .stack_idx = mpipe_mbuf_stack_index(priv, mbuf),
1320                                 .size      = priv->rx_size_code,
1321                         } };
1322                         if (mpipe_local.mbuf_push_debt[port_id] > 0) {
1323                                 mpipe_local.mbuf_push_debt[port_id]--;
1324                                 desc.hwb = 1;
1325                                 priv->tx_comps[idx] = NULL;
1326                         } else
1327                                 priv->tx_comps[idx] = mbuf;
1328
1329                         nb_bytes += mbuf->data_len;
1330                         gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
1331
1332                         PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
1333                                      mpipe_name(priv),
1334                                      tx_queue->q.queue_idx,
1335                                      rte_pktmbuf_mtod(mbuf, void *),
1336                                      rte_pktmbuf_data_len(mbuf));
1337
1338                         mbuf = next;
1339                 }
1340
1341                 if (unlikely(nb_sent < nb_pkts)) {
1342
1343                         /* Fill remaining slots with null descriptors. */
1344                         mpipe_xmit_null(priv, slot + i, slot + nb_slots);
1345
1346                         /*
1347                          * Calculate exact number of descriptors needed for
1348                          * the next go around.
1349                          */
1350                         nb_slots = 0;
1351                         for (i = nb_sent; i < nb_pkts; i++) {
1352                                 nb_slots += tx_pkts[i]->nb_segs;
1353                         }
1354
1355                         nb_slots = RTE_MIN(nb_slots, MPIPE_TX_DESCS / 2);
1356                 }
1357         } while (nb_sent < nb_pkts);
1358
1359         tx_queue->q.stats.packets += nb_sent;
1360         tx_queue->q.stats.bytes   += nb_bytes;
1361
1362         return nb_sent;
1363 }
1364
1365 static inline uint16_t
1366 mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
1367               uint16_t nb_pkts)
1368 {
1369         struct mpipe_dev_priv *priv = rx_queue->q.priv;
1370         gxio_mpipe_iqueue_t *iqueue = &rx_queue->iqueue;
1371         gxio_mpipe_idesc_t *first_idesc, *idesc, *last_idesc;
1372         uint8_t in_port = rx_queue->q.port_id;
1373         const unsigned look_ahead = 8;
1374         int room = nb_pkts, rc = 0;
1375         unsigned nb_packets = 0;
1376         unsigned nb_dropped = 0;
1377         unsigned nb_nomem = 0;
1378         unsigned nb_bytes = 0;
1379         unsigned nb_descs, i;
1380
1381         while (room && !rc) {
1382                 if (rx_queue->avail_descs < room) {
1383                         rc = gxio_mpipe_iqueue_try_peek(iqueue,
1384                                                         &rx_queue->next_desc);
1385                         rx_queue->avail_descs = rc < 0 ? 0 : rc;
1386                 }
1387
1388                 if (unlikely(!rx_queue->avail_descs)) {
1389                         break;
1390                 }
1391
1392                 nb_descs = RTE_MIN(room, rx_queue->avail_descs);
1393
1394                 first_idesc = rx_queue->next_desc;
1395                 last_idesc  = first_idesc + nb_descs;
1396
1397                 rx_queue->next_desc   += nb_descs;
1398                 rx_queue->avail_descs -= nb_descs;
1399
1400                 for (i = 1; i < look_ahead; i++) {
1401                         rte_prefetch0(first_idesc + i);
1402                 }
1403
1404                 PMD_DEBUG_RX("%s:%d: Trying to receive %d packets\n",
1405                              mpipe_name(rx_queue->q.priv),
1406                              rx_queue->q.queue_idx,
1407                              nb_descs);
1408
1409                 for (idesc = first_idesc; idesc < last_idesc; idesc++) {
1410                         struct rte_mbuf *mbuf;
1411
1412                         PMD_DEBUG_RX("%s:%d: processing idesc %d/%d\n",
1413                                      mpipe_name(priv),
1414                                      rx_queue->q.queue_idx,
1415                                      nb_packets, nb_descs);
1416
1417                         rte_prefetch0(idesc + look_ahead);
1418
1419                         PMD_DEBUG_RX("%s:%d: idesc %p, %s%s%s%s%s%s%s%s%s%s"
1420                                      "size: %d, bkt: %d, chan: %d, ring: %d, sqn: %lu, va: %lu\n",
1421                                      mpipe_name(priv),
1422                                      rx_queue->q.queue_idx,
1423                                      idesc,
1424                                      idesc->me ? "me, " : "",
1425                                      idesc->tr ? "tr, " : "",
1426                                      idesc->ce ? "ce, " : "",
1427                                      idesc->ct ? "ct, " : "",
1428                                      idesc->cs ? "cs, " : "",
1429                                      idesc->nr ? "nr, " : "",
1430                                      idesc->sq ? "sq, " : "",
1431                                      idesc->ts ? "ts, " : "",
1432                                      idesc->ps ? "ps, " : "",
1433                                      idesc->be ? "be, " : "",
1434                                      idesc->l2_size,
1435                                      idesc->bucket_id,
1436                                      idesc->channel,
1437                                      idesc->notif_ring,
1438                                      (unsigned long)idesc->packet_sqn,
1439                                      (unsigned long)idesc->va);
1440
1441                         if (unlikely(gxio_mpipe_idesc_has_error(idesc))) {
1442                                 nb_dropped++;
1443                                 gxio_mpipe_iqueue_drop(iqueue, idesc);
1444                                 PMD_DEBUG_RX("%s:%d: Descriptor error\n",
1445                                              mpipe_name(rx_queue->q.priv),
1446                                              rx_queue->q.queue_idx);
1447                                 continue;
1448                         }
1449
1450                         if (mpipe_local.mbuf_push_debt[in_port] <
1451                                         MPIPE_BUF_DEBT_THRESHOLD)
1452                                 mpipe_local.mbuf_push_debt[in_port]++;
1453                         else {
1454                                 mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
1455                                 if (unlikely(!mbuf)) {
1456                                         nb_nomem++;
1457                                         gxio_mpipe_iqueue_drop(iqueue, idesc);
1458                                         PMD_DEBUG_RX("%s:%d: alloc failure\n",
1459                                              mpipe_name(rx_queue->q.priv),
1460                                              rx_queue->q.queue_idx);
1461                                         continue;
1462                                 }
1463
1464                                 mpipe_recv_push(priv, mbuf);
1465                         }
1466
1467                         /* Get and setup the mbuf for the received packet. */
1468                         mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
1469
1470                         /* Update results and statistics counters. */
1471                         rx_pkts[nb_packets] = mbuf;
1472                         nb_bytes += mbuf->pkt_len;
1473                         nb_packets++;
1474                 }
1475
1476                 /*
1477                  * We release the ring in bursts, but do not track and release
1478                  * buckets.  This therefore breaks dynamic flow affinity, but
1479                  * we always operate in static affinity mode, and so we're OK
1480                  * with this optimization.
1481                  */
1482                 gxio_mpipe_iqueue_advance(iqueue, nb_descs);
1483                 gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, nb_descs);
1484
1485                 /*
1486                  * Go around once more if we haven't yet peeked the queue, and
1487                  * if we have more room to receive.
1488                  */
1489                 room = nb_pkts - nb_packets;
1490         }
1491
1492         rx_queue->q.stats.packets += nb_packets;
1493         rx_queue->q.stats.bytes   += nb_bytes;
1494         rx_queue->q.stats.errors  += nb_dropped;
1495         rx_queue->q.stats.nomem   += nb_nomem;
1496
1497         PMD_DEBUG_RX("%s:%d: RX: %d/%d pkts/bytes, %d/%d drops/nomem\n",
1498                      mpipe_name(rx_queue->q.priv), rx_queue->q.queue_idx,
1499                      nb_packets, nb_bytes, nb_dropped, nb_nomem);
1500
1501         return nb_packets;
1502 }
1503
1504 static uint16_t
1505 mpipe_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1506 {
1507         struct mpipe_rx_queue *rx_queue = _rxq;
1508         uint16_t result = 0;
1509
1510         if (rx_queue) {
1511                 mpipe_dp_enter(rx_queue->q.priv);
1512                 if (likely(rx_queue->q.link_status))
1513                         result = mpipe_do_recv(rx_queue, rx_pkts, nb_pkts);
1514                 mpipe_dp_exit(rx_queue->q.priv);
1515         }
1516
1517         return result;
1518 }
1519
1520 static uint16_t
1521 mpipe_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1522 {
1523         struct mpipe_tx_queue *tx_queue = _txq;
1524         uint16_t result = 0;
1525
1526         if (tx_queue) {
1527                 mpipe_dp_enter(tx_queue->q.priv);
1528                 if (likely(tx_queue->q.link_status))
1529                         result = mpipe_do_xmit(tx_queue, tx_pkts, nb_pkts);
1530                 mpipe_dp_exit(tx_queue->q.priv);
1531         }
1532
1533         return result;
1534 }
1535
1536 static int
1537 mpipe_link_mac(const char *ifname, uint8_t *mac)
1538 {
1539         int rc, idx;
1540         char name[GXIO_MPIPE_LINK_NAME_LEN];
1541
1542         for (idx = 0, rc = 0; !rc; idx++) {
1543                 rc = gxio_mpipe_link_enumerate_mac(idx, name, mac);
1544                 if (!rc && !strncmp(name, ifname, GXIO_MPIPE_LINK_NAME_LEN))
1545                         return 0;
1546         }
1547         return -ENODEV;
1548 }
1549
1550 static int
1551 rte_pmd_mpipe_probe_common(struct rte_vdev_driver *drv, const char *ifname,
1552                       const char *params __rte_unused)
1553 {
1554         gxio_mpipe_context_t *context;
1555         struct rte_eth_dev *eth_dev;
1556         struct mpipe_dev_priv *priv;
1557         int instance, rc;
1558         uint8_t *mac;
1559
1560         /* Get the mPIPE instance that the device belongs to. */
1561         instance = gxio_mpipe_link_instance(ifname);
1562         context = mpipe_context(instance);
1563         if (!context) {
1564                 RTE_LOG(ERR, PMD, "%s: No device for link.\n", ifname);
1565                 return -ENODEV;
1566         }
1567
1568         priv = rte_zmalloc(NULL, sizeof(*priv), 0);
1569         if (!priv) {
1570                 RTE_LOG(ERR, PMD, "%s: Failed to allocate priv.\n", ifname);
1571                 return -ENOMEM;
1572         }
1573
1574         memset(&priv->tx_stat_mapping, 0xff, sizeof(priv->tx_stat_mapping));
1575         memset(&priv->rx_stat_mapping, 0xff, sizeof(priv->rx_stat_mapping));
1576         priv->context = context;
1577         priv->instance = instance;
1578         priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0);
1579         priv->channel = -1;
1580
1581         mac = priv->mac_addr.addr_bytes;
1582         rc = mpipe_link_mac(ifname, mac);
1583         if (rc < 0) {
1584                 RTE_LOG(ERR, PMD, "%s: Failed to enumerate link.\n", ifname);
1585                 rte_free(priv);
1586                 return -ENODEV;
1587         }
1588
1589         eth_dev = rte_eth_dev_allocate(ifname);
1590         if (!eth_dev) {
1591                 RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
1592                 rte_free(priv);
1593                 return -ENOMEM;
1594         }
1595
1596         RTE_LOG(INFO, PMD, "%s: Initialized mpipe device"
1597                 "(mac %02x:%02x:%02x:%02x:%02x:%02x).\n",
1598                 ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1599
1600         priv->eth_dev = eth_dev;
1601         priv->port_id = eth_dev->data->port_id;
1602         eth_dev->data->dev_private = priv;
1603         eth_dev->data->mac_addrs = &priv->mac_addr;
1604
1605         eth_dev->data->kdrv = RTE_KDRV_NONE;
1606         eth_dev->driver = NULL;
1607         eth_dev->data->drv_name = drv->driver.name;
1608         eth_dev->data->numa_node = instance;
1609
1610         eth_dev->dev_ops      = &mpipe_dev_ops;
1611         eth_dev->rx_pkt_burst = &mpipe_recv_pkts;
1612         eth_dev->tx_pkt_burst = &mpipe_xmit_pkts;
1613
1614         rc = mpipe_link_init(priv);
1615         if (rc < 0) {
1616                 RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
1617                         mpipe_name(priv));
1618                 return rc;
1619         }
1620
1621         return 0;
1622 }
1623
1624 static int
1625 rte_pmd_mpipe_xgbe_probe(const char *ifname, const char *params __rte_unused)
1626 {
1627         return rte_pmd_mpipe_probe_common(&pmd_mpipe_xgbe_drv, ifname, params);
1628 }
1629
1630 static int
1631 rte_pmd_mpipe_gbe_probe(const char *ifname, const char *params __rte_unused)
1632 {
1633         return rte_pmd_mpipe_probe_common(&pmd_mpipe_gbe_drv, ifname, params);
1634 }
1635
1636 static struct rte_vdev_driver pmd_mpipe_xgbe_drv = {
1637         .probe = rte_pmd_mpipe_xgbe_probe,
1638 };
1639
1640 static struct rte_vdev_driver pmd_mpipe_gbe_drv = {
1641         .probe = rte_pmd_mpipe_gbe_probe,
1642 };
1643
1644 RTE_PMD_REGISTER_VDEV(net_mpipe_xgbe, pmd_mpipe_xgbe_drv);
1645 RTE_PMD_REGISTER_ALIAS(net_mpipe_xgbe, xgbe);
1646 RTE_PMD_REGISTER_VDEV(net_mpipe_gbe, pmd_mpipe_gbe_drv);
1647 RTE_PMD_REGISTER_ALIAS(net_mpipe_gbe, gbe);
1648
1649 static void __attribute__((constructor, used))
1650 mpipe_init_contexts(void)
1651 {
1652         struct mpipe_context *context;
1653         int rc, instance;
1654
1655         for (instance = 0; instance < GXIO_MPIPE_INSTANCE_MAX; instance++) {
1656                 context = &mpipe_contexts[instance];
1657
1658                 rte_spinlock_init(&context->lock);
1659                 rc = gxio_mpipe_init(&context->context, instance);
1660                 if (rc < 0)
1661                         break;
1662         }
1663
1664         mpipe_instances = instance;
1665 }