tile: fix build
[dpdk.git] / drivers / net / mpipe / mpipe_tilegx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2015 EZchip Semiconductor Ltd. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of EZchip Semiconductor nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <unistd.h>
34
35 #include <rte_eal.h>
36 #include <rte_vdev.h>
37 #include <rte_eal_memconfig.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_cycles.h>
41
42 #include <gxio/mpipe.h>
43
44 /* mPIPE GBE hardware register definitions. */
45 #define MPIPE_GBE_NETWORK_CONFIGURATION 0x8008
46 #define MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT 4
47 #define MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT 6
48 #define MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT 7
49
50 /* mPIPE XAUI hardware register definitions. */
51 #define MPIPE_XAUI_RECEIVE_CONFIGURATION 0x8020
52 #define MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT 0
53 #define MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT 2
54 #define MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT 3
55
56 #ifdef RTE_LIBRTE_MPIPE_PMD_DEBUG
57 #define PMD_DEBUG_RX(...)       RTE_LOG(DEBUG, PMD, __VA_ARGS__)
58 #define PMD_DEBUG_TX(...)       RTE_LOG(DEBUG, PMD, __VA_ARGS__)
59 #else
60 #define PMD_DEBUG_RX(...)
61 #define PMD_DEBUG_TX(...)
62 #endif
63
64 #define MPIPE_MAX_CHANNELS              128
65 #define MPIPE_TX_MAX_QUEUES             128
66 #define MPIPE_RX_MAX_QUEUES             16
67 #define MPIPE_TX_DESCS                  512
68 #define MPIPE_RX_BUCKETS                256
69 #define MPIPE_RX_STACK_SIZE             65536
70 #define MPIPE_RX_IP_ALIGN               2
71 #define MPIPE_BSM_ALIGN                 128
72
73 #define MPIPE_LINK_UPDATE_TIMEOUT       10      /*  s */
74 #define MPIPE_LINK_UPDATE_INTERVAL      100000  /* us */
75
76 struct mpipe_channel_config {
77         int enable;
78         int first_bucket;
79         int num_buckets;
80         int head_room;
81         gxio_mpipe_rules_stacks_t stacks;
82 };
83
84 struct mpipe_context {
85         rte_spinlock_t        lock;
86         gxio_mpipe_context_t  context;
87         struct mpipe_channel_config channels[MPIPE_MAX_CHANNELS];
88 };
89
90 /* Per-core local data. */
91 struct mpipe_local {
92         int mbuf_push_debt[RTE_MAX_ETHPORTS];   /* Buffer push debt. */
93 } __rte_cache_aligned;
94
95 #define MPIPE_BUF_DEBT_THRESHOLD        32
96 static __thread struct mpipe_local mpipe_local;
97 static struct mpipe_context mpipe_contexts[GXIO_MPIPE_INSTANCE_MAX];
98 static int mpipe_instances;
99
100 /* Per queue statistics. */
101 struct mpipe_queue_stats {
102         uint64_t packets, bytes, errors, nomem;
103 };
104
105 /* Common tx/rx queue fields. */
106 struct mpipe_queue {
107         struct mpipe_dev_priv *priv;    /* "priv" data of its device. */
108         uint16_t nb_desc;               /* Number of tx descriptors. */
109         uint16_t port_id;               /* Device index. */
110         uint16_t stat_idx;              /* Queue stats index. */
111         uint8_t queue_idx;              /* Queue index. */
112         uint8_t link_status;            /* 0 = link down. */
113         struct mpipe_queue_stats stats; /* Stat data for the queue. */
114 };
115
116 /* Transmit queue description. */
117 struct mpipe_tx_queue {
118         struct mpipe_queue q;           /* Common stuff. */
119 };
120
121 /* Receive queue description. */
122 struct mpipe_rx_queue {
123         struct mpipe_queue q;           /* Common stuff. */
124         gxio_mpipe_iqueue_t iqueue;     /* mPIPE iqueue. */
125         gxio_mpipe_idesc_t *next_desc;  /* Next idesc to process. */
126         int avail_descs;                /* Number of available descs. */
127         void *rx_ring_mem;              /* DMA ring memory. */
128 };
129
130 struct mpipe_dev_priv {
131         gxio_mpipe_context_t *context;  /* mPIPE context. */
132         gxio_mpipe_link_t link;         /* mPIPE link for the device. */
133         gxio_mpipe_equeue_t equeue;     /* mPIPE equeue. */
134         unsigned equeue_size;           /* mPIPE equeue desc count. */
135         int instance;                   /* mPIPE instance. */
136         int ering;                      /* mPIPE eDMA ring. */
137         int stack;                      /* mPIPE buffer stack. */
138         int channel;                    /* Device channel. */
139         int port_id;                    /* DPDK port index. */
140         struct rte_eth_dev *eth_dev;    /* DPDK device. */
141         struct rte_mbuf **tx_comps;     /* TX completion array. */
142         struct rte_mempool *rx_mpool;   /* mpool used by the rx queues. */
143         unsigned rx_offset;             /* Receive head room. */
144         unsigned rx_size_code;          /* mPIPE rx buffer size code. */
145         int is_xaui:1,                  /* Is this an xgbe or gbe? */
146             initialized:1,              /* Initialized port? */
147             running:1;                  /* Running port? */
148         struct ether_addr mac_addr;     /* MAC address. */
149         unsigned nb_rx_queues;          /* Configured tx queues. */
150         unsigned nb_tx_queues;          /* Configured rx queues. */
151         int first_bucket;               /* mPIPE bucket start index. */
152         int first_ring;                 /* mPIPE notif ring start index. */
153         int notif_group;                /* mPIPE notif group. */
154         rte_atomic32_t dp_count __rte_cache_aligned;    /* DP Entry count. */
155         int tx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
156         int rx_stat_mapping[RTE_ETHDEV_QUEUE_STAT_CNTRS];
157 };
158
159 #define mpipe_priv(dev)                 \
160         ((struct mpipe_dev_priv*)(dev)->data->dev_private)
161
162 #define mpipe_name(priv)                \
163         ((priv)->eth_dev->data->name)
164
165 #define mpipe_rx_queue(priv, n)         \
166         ((struct mpipe_rx_queue *)(priv)->eth_dev->data->rx_queues[n])
167
168 #define mpipe_tx_queue(priv, n)         \
169         ((struct mpipe_tx_queue *)(priv)->eth_dev->data->tx_queues[n])
170
171 static void
172 mpipe_xmit_flush(struct mpipe_dev_priv *priv);
173
174 static void
175 mpipe_recv_flush(struct mpipe_dev_priv *priv);
176
177 static int mpipe_equeue_sizes[] = {
178         [GXIO_MPIPE_EQUEUE_ENTRY_512]   = 512,
179         [GXIO_MPIPE_EQUEUE_ENTRY_2K]    = 2048,
180         [GXIO_MPIPE_EQUEUE_ENTRY_8K]    = 8192,
181         [GXIO_MPIPE_EQUEUE_ENTRY_64K]   = 65536,
182 };
183
184 static int mpipe_iqueue_sizes[] = {
185         [GXIO_MPIPE_IQUEUE_ENTRY_128]   = 128,
186         [GXIO_MPIPE_IQUEUE_ENTRY_512]   = 512,
187         [GXIO_MPIPE_IQUEUE_ENTRY_2K]    = 2048,
188         [GXIO_MPIPE_IQUEUE_ENTRY_64K]   = 65536,
189 };
190
191 static int mpipe_buffer_sizes[] = {
192         [GXIO_MPIPE_BUFFER_SIZE_128]    = 128,
193         [GXIO_MPIPE_BUFFER_SIZE_256]    = 256,
194         [GXIO_MPIPE_BUFFER_SIZE_512]    = 512,
195         [GXIO_MPIPE_BUFFER_SIZE_1024]   = 1024,
196         [GXIO_MPIPE_BUFFER_SIZE_1664]   = 1664,
197         [GXIO_MPIPE_BUFFER_SIZE_4096]   = 4096,
198         [GXIO_MPIPE_BUFFER_SIZE_10368]  = 10368,
199         [GXIO_MPIPE_BUFFER_SIZE_16384]  = 16384,
200 };
201
202 static gxio_mpipe_context_t *
203 mpipe_context(int instance)
204 {
205         if (instance < 0 || instance >= mpipe_instances)
206                 return NULL;
207         return &mpipe_contexts[instance].context;
208 }
209
210 static int mpipe_channel_config(int instance, int channel,
211                                 struct mpipe_channel_config *config)
212 {
213         struct mpipe_channel_config *data;
214         struct mpipe_context *context;
215         gxio_mpipe_rules_t rules;
216         int idx, rc = 0;
217
218         if (instance < 0 || instance >= mpipe_instances ||
219             channel < 0 || channel >= MPIPE_MAX_CHANNELS)
220                 return -EINVAL;
221
222         context = &mpipe_contexts[instance];
223
224         rte_spinlock_lock(&context->lock);
225
226         gxio_mpipe_rules_init(&rules, &context->context);
227
228         for (idx = 0; idx < MPIPE_MAX_CHANNELS; idx++) {
229                 data = (channel == idx) ? config : &context->channels[idx];
230
231                 if (!data->enable)
232                         continue;
233
234                 rc = gxio_mpipe_rules_begin(&rules, data->first_bucket,
235                                             data->num_buckets, &data->stacks);
236                 if (rc < 0) {
237                         goto done;
238                 }
239
240                 rc = gxio_mpipe_rules_add_channel(&rules, idx);
241                 if (rc < 0) {
242                         goto done;
243                 }
244
245                 rc = gxio_mpipe_rules_set_headroom(&rules, data->head_room);
246                 if (rc < 0) {
247                         goto done;
248                 }
249         }
250
251         rc = gxio_mpipe_rules_commit(&rules);
252         if (rc == 0) {
253                 memcpy(&context->channels[channel], config, sizeof(*config));
254         }
255
256 done:
257         rte_spinlock_unlock(&context->lock);
258
259         return rc;
260 }
261
262 static int
263 mpipe_get_size_index(int *array, int count, int size,
264                      bool roundup)
265 {
266         int i, last = -1;
267
268         for (i = 0; i < count && array[i] < size; i++) {
269                 if (array[i])
270                         last = i;
271         }
272
273         if (roundup)
274                 return i < count ? (int)i : -ENOENT;
275         else
276                 return last >= 0 ? last : -ENOENT;
277 }
278
279 static int
280 mpipe_calc_size(int *array, int count, int size)
281 {
282         int index = mpipe_get_size_index(array, count, size, 1);
283         return index < 0 ? index : array[index];
284 }
285
286 static int mpipe_equeue_size(int size)
287 {
288         int result;
289         result = mpipe_calc_size(mpipe_equeue_sizes,
290                                  RTE_DIM(mpipe_equeue_sizes), size);
291         return result;
292 }
293
294 static int mpipe_iqueue_size(int size)
295 {
296         int result;
297         result = mpipe_calc_size(mpipe_iqueue_sizes,
298                                  RTE_DIM(mpipe_iqueue_sizes), size);
299         return result;
300 }
301
302 static int mpipe_buffer_size_index(int size)
303 {
304         int result;
305         result = mpipe_get_size_index(mpipe_buffer_sizes,
306                                       RTE_DIM(mpipe_buffer_sizes), size, 0);
307         return result;
308 }
309
310 static inline int
311 mpipe_dev_atomic_read_link_status(struct rte_eth_dev *dev,
312                                   struct rte_eth_link *link)
313 {
314         struct rte_eth_link *dst = link;
315         struct rte_eth_link *src = &(dev->data->dev_link);
316
317         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
318                                 *(uint64_t *)src) == 0)
319                 return -1;
320
321         return 0;
322 }
323
324 static inline int
325 mpipe_dev_atomic_write_link_status(struct rte_eth_dev *dev,
326                                    struct rte_eth_link *link)
327 {
328         struct rte_eth_link *dst = &(dev->data->dev_link);
329         struct rte_eth_link *src = link;
330
331         if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
332                                 *(uint64_t *)src) == 0)
333                 return -1;
334
335         return 0;
336 }
337
338 static void
339 mpipe_infos_get(struct rte_eth_dev *dev __rte_unused,
340                 struct rte_eth_dev_info *dev_info)
341 {
342         dev_info->min_rx_bufsize  = 128;
343         dev_info->max_rx_pktlen   = 1518;
344         dev_info->max_tx_queues   = MPIPE_TX_MAX_QUEUES;
345         dev_info->max_rx_queues   = MPIPE_RX_MAX_QUEUES;
346         dev_info->max_mac_addrs   = 1;
347         dev_info->rx_offload_capa = 0;
348         dev_info->tx_offload_capa = 0;
349 }
350
351 static int
352 mpipe_configure(struct rte_eth_dev *dev)
353 {
354         struct mpipe_dev_priv *priv = mpipe_priv(dev);
355
356         if (dev->data->nb_tx_queues > MPIPE_TX_MAX_QUEUES) {
357                 RTE_LOG(ERR, PMD, "%s: Too many tx queues: %d > %d\n",
358                         mpipe_name(priv), dev->data->nb_tx_queues,
359                         MPIPE_TX_MAX_QUEUES);
360                 return -EINVAL;
361         }
362         priv->nb_tx_queues = dev->data->nb_tx_queues;
363
364         if (dev->data->nb_rx_queues > MPIPE_RX_MAX_QUEUES) {
365                 RTE_LOG(ERR, PMD, "%s: Too many rx queues: %d > %d\n",
366                         mpipe_name(priv), dev->data->nb_rx_queues,
367                         MPIPE_RX_MAX_QUEUES);
368         }
369         priv->nb_rx_queues = dev->data->nb_rx_queues;
370
371         return 0;
372 }
373
374 static inline int
375 mpipe_link_compare(struct rte_eth_link *link1,
376                    struct rte_eth_link *link2)
377 {
378         return (*(uint64_t *)link1 == *(uint64_t *)link2)
379                 ? -1 : 0;
380 }
381
382 static int
383 mpipe_link_update(struct rte_eth_dev *dev, int wait_to_complete)
384 {
385         struct mpipe_dev_priv *priv = mpipe_priv(dev);
386         struct rte_eth_link old, new;
387         int64_t state, speed;
388         int count, rc;
389
390         memset(&old, 0, sizeof(old));
391         memset(&new, 0, sizeof(new));
392         mpipe_dev_atomic_read_link_status(dev, &old);
393
394         for (count = 0, rc = 0; count < MPIPE_LINK_UPDATE_TIMEOUT; count++) {
395                 if (!priv->initialized)
396                         break;
397
398                 state = gxio_mpipe_link_get_attr(&priv->link,
399                                                  GXIO_MPIPE_LINK_CURRENT_STATE);
400                 if (state < 0)
401                         break;
402
403                 speed = state & GXIO_MPIPE_LINK_SPEED_MASK;
404
405                 new.link_autoneg = (dev->data->dev_conf.link_speeds &
406                                 ETH_LINK_SPEED_AUTONEG);
407                 if (speed == GXIO_MPIPE_LINK_1G) {
408                         new.link_speed = ETH_SPEED_NUM_1G;
409                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
410                         new.link_status = ETH_LINK_UP;
411                 } else if (speed == GXIO_MPIPE_LINK_10G) {
412                         new.link_speed = ETH_SPEED_NUM_10G;
413                         new.link_duplex = ETH_LINK_FULL_DUPLEX;
414                         new.link_status = ETH_LINK_UP;
415                 }
416
417                 rc = mpipe_link_compare(&old, &new);
418                 if (rc == 0 || !wait_to_complete)
419                         break;
420
421                 rte_delay_us(MPIPE_LINK_UPDATE_INTERVAL);
422         }
423
424         mpipe_dev_atomic_write_link_status(dev, &new);
425         return rc;
426 }
427
428 static int
429 mpipe_set_link(struct rte_eth_dev *dev, int up)
430 {
431         struct mpipe_dev_priv *priv = mpipe_priv(dev);
432         int rc;
433
434         rc = gxio_mpipe_link_set_attr(&priv->link,
435                                       GXIO_MPIPE_LINK_DESIRED_STATE,
436                                       up ? GXIO_MPIPE_LINK_ANYSPEED : 0);
437         if (rc < 0) {
438                 RTE_LOG(ERR, PMD, "%s: Failed to set link %s.\n",
439                         mpipe_name(priv), up ? "up" : "down");
440         } else {
441                 mpipe_link_update(dev, 0);
442         }
443
444         return rc;
445 }
446
447 static int
448 mpipe_set_link_up(struct rte_eth_dev *dev)
449 {
450         return mpipe_set_link(dev, 1);
451 }
452
453 static int
454 mpipe_set_link_down(struct rte_eth_dev *dev)
455 {
456         return mpipe_set_link(dev, 0);
457 }
458
459 static inline void
460 mpipe_dp_enter(struct mpipe_dev_priv *priv)
461 {
462         __insn_mtspr(SPR_DSTREAM_PF, 0);
463         rte_atomic32_inc(&priv->dp_count);
464 }
465
466 static inline void
467 mpipe_dp_exit(struct mpipe_dev_priv *priv)
468 {
469         rte_atomic32_dec(&priv->dp_count);
470 }
471
472 static inline void
473 mpipe_dp_wait(struct mpipe_dev_priv *priv)
474 {
475         while (rte_atomic32_read(&priv->dp_count) != 0) {
476                 rte_pause();
477         }
478 }
479
480 static inline int
481 mpipe_mbuf_stack_index(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
482 {
483         return (mbuf->port < RTE_MAX_ETHPORTS) ?
484                 mpipe_priv(&rte_eth_devices[mbuf->port])->stack :
485                 priv->stack;
486 }
487
488 static inline struct rte_mbuf *
489 mpipe_recv_mbuf(struct mpipe_dev_priv *priv, gxio_mpipe_idesc_t *idesc,
490                 int in_port)
491 {
492         void *va = gxio_mpipe_idesc_get_va(idesc);
493         uint16_t size = gxio_mpipe_idesc_get_xfer_size(idesc);
494         struct rte_mbuf *mbuf = RTE_PTR_SUB(va, priv->rx_offset);
495
496         rte_pktmbuf_reset(mbuf);
497         mbuf->data_off = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
498         mbuf->port     = in_port;
499         mbuf->data_len = size;
500         mbuf->pkt_len  = size;
501         mbuf->hash.rss = gxio_mpipe_idesc_get_flow_hash(idesc);
502
503         PMD_DEBUG_RX("%s: RX mbuf %p, buffer %p, buf_addr %p, size %d\n",
504                      mpipe_name(priv), mbuf, va, mbuf->buf_addr, size);
505
506         return mbuf;
507 }
508
509 static inline void
510 mpipe_recv_push(struct mpipe_dev_priv *priv, struct rte_mbuf *mbuf)
511 {
512         const int offset = RTE_PKTMBUF_HEADROOM + MPIPE_RX_IP_ALIGN;
513         void *buf_addr = RTE_PTR_ADD(mbuf->buf_addr, offset);
514
515         gxio_mpipe_push_buffer(priv->context, priv->stack, buf_addr);
516         PMD_DEBUG_RX("%s: Pushed mbuf %p, buffer %p into stack %d\n",
517                      mpipe_name(priv), mbuf, buf_addr, priv->stack);
518 }
519
520 static inline void
521 mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count)
522 {
523         struct rte_mbuf *mbuf;
524         int i;
525
526         for (i = 0; i < count; i++) {
527                 mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
528                 if (!mbuf)
529                         break;
530                 mpipe_recv_push(priv, mbuf);
531         }
532
533         PMD_DEBUG_RX("%s: Filled %d/%d buffers\n", mpipe_name(priv), i, count);
534 }
535
536 static inline void
537 mpipe_recv_flush_stack(struct mpipe_dev_priv *priv)
538 {
539         const int offset = priv->rx_offset & ~RTE_MEMPOOL_ALIGN_MASK;
540         uint8_t in_port = priv->port_id;
541         struct rte_mbuf *mbuf;
542         void *va;
543
544         while (1) {
545                 va = gxio_mpipe_pop_buffer(priv->context, priv->stack);
546                 if (!va)
547                         break;
548                 mbuf = RTE_PTR_SUB(va, offset);
549
550                 PMD_DEBUG_RX("%s: Flushing mbuf %p, va %p\n",
551                              mpipe_name(priv), mbuf, va);
552
553                 mbuf->data_off    = (uintptr_t)va - (uintptr_t)mbuf->buf_addr;
554                 mbuf->refcnt      = 1;
555                 mbuf->nb_segs     = 1;
556                 mbuf->port        = in_port;
557                 mbuf->packet_type = 0;
558                 mbuf->data_len    = 0;
559                 mbuf->pkt_len     = 0;
560
561                 __rte_mbuf_raw_free(mbuf);
562         }
563 }
564
565 static void
566 mpipe_register_segment(struct mpipe_dev_priv *priv, const struct rte_memseg *ms)
567 {
568         size_t size = ms->hugepage_sz;
569         uint8_t *addr, *end;
570         int rc = -EINVAL;
571
572         for (addr = ms->addr, end = addr + ms->len; addr < end; addr += size) {
573                 rc = gxio_mpipe_register_page(priv->context, priv->stack, addr,
574                                               size, 0);
575                 if (rc < 0)
576                         break;
577         }
578
579         if (rc < 0) {
580                 RTE_LOG(ERR, PMD, "%s: Could not register memseg @%p, %d.\n",
581                         mpipe_name(priv), ms->addr, rc);
582         } else {
583                 RTE_LOG(DEBUG, PMD, "%s: Registered segment %p - %p\n",
584                         mpipe_name(priv), ms->addr,
585                         RTE_PTR_ADD(ms->addr, ms->len - 1));
586         }
587 }
588
589 static int
590 mpipe_recv_init(struct mpipe_dev_priv *priv)
591 {
592         const struct rte_memseg *seg = rte_eal_get_physmem_layout();
593         size_t stack_size;
594         void *stack_mem;
595         int rc;
596
597         if (!priv->rx_mpool) {
598                 RTE_LOG(ERR, PMD, "%s: No buffer pool.\n",
599                         mpipe_name(priv));
600                 return -ENODEV;
601         }
602
603         /* Allocate one NotifRing for each queue. */
604         rc = gxio_mpipe_alloc_notif_rings(priv->context, MPIPE_RX_MAX_QUEUES,
605                                           0, 0);
606         if (rc < 0) {
607                 RTE_LOG(ERR, PMD, "%s: Failed to allocate notif rings.\n",
608                         mpipe_name(priv));
609                 return rc;
610         }
611         priv->first_ring = rc;
612
613         /* Allocate a NotifGroup. */
614         rc = gxio_mpipe_alloc_notif_groups(priv->context, 1, 0, 0);
615         if (rc < 0) {
616                 RTE_LOG(ERR, PMD, "%s: Failed to allocate rx group.\n",
617                         mpipe_name(priv));
618                 return rc;
619         }
620         priv->notif_group = rc;
621
622         /* Allocate required buckets. */
623         rc = gxio_mpipe_alloc_buckets(priv->context, MPIPE_RX_BUCKETS, 0, 0);
624         if (rc < 0) {
625                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buckets.\n",
626                         mpipe_name(priv));
627                 return rc;
628         }
629         priv->first_bucket = rc;
630
631         rc = gxio_mpipe_alloc_buffer_stacks(priv->context, 1, 0, 0);
632         if (rc < 0) {
633                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer stack.\n",
634                         mpipe_name(priv));
635                 return rc;
636         }
637         priv->stack = rc;
638
639         while (seg && seg->addr)
640                 mpipe_register_segment(priv, seg++);
641
642         stack_size = gxio_mpipe_calc_buffer_stack_bytes(MPIPE_RX_STACK_SIZE);
643         stack_mem = rte_zmalloc(NULL, stack_size, 65536);
644         if (!stack_mem) {
645                 RTE_LOG(ERR, PMD, "%s: Failed to allocate buffer memory.\n",
646                         mpipe_name(priv));
647                 return -ENOMEM;
648         } else {
649                 RTE_LOG(DEBUG, PMD, "%s: Buffer stack memory %p - %p.\n",
650                         mpipe_name(priv), stack_mem,
651                         RTE_PTR_ADD(stack_mem, stack_size - 1));
652         }
653
654         rc = gxio_mpipe_init_buffer_stack(priv->context, priv->stack,
655                                           priv->rx_size_code, stack_mem,
656                                           stack_size, 0);
657         if (rc < 0) {
658                 RTE_LOG(ERR, PMD, "%s: Failed to initialize buffer stack.\n",
659                         mpipe_name(priv));
660                 return rc;
661         }
662
663         return 0;
664 }
665
666 static int
667 mpipe_xmit_init(struct mpipe_dev_priv *priv)
668 {
669         size_t ring_size;
670         void *ring_mem;
671         int rc;
672
673         /* Allocate eDMA ring. */
674         rc = gxio_mpipe_alloc_edma_rings(priv->context, 1, 0, 0);
675         if (rc < 0) {
676                 RTE_LOG(ERR, PMD, "%s: Failed to alloc tx ring.\n",
677                         mpipe_name(priv));
678                 return rc;
679         }
680         priv->ering = rc;
681
682         rc = mpipe_equeue_size(MPIPE_TX_DESCS);
683         if (rc < 0) {
684                 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d equeue descs.\n",
685                         mpipe_name(priv), (int)MPIPE_TX_DESCS);
686                 return -ENOMEM;
687         }
688         priv->equeue_size = rc;
689
690         /* Initialize completion array. */
691         ring_size = sizeof(priv->tx_comps[0]) * priv->equeue_size;
692         priv->tx_comps = rte_zmalloc(NULL, ring_size, RTE_CACHE_LINE_SIZE);
693         if (!priv->tx_comps) {
694                 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress comps.\n",
695                         mpipe_name(priv));
696                 return -ENOMEM;
697         }
698
699         /* Allocate eDMA ring memory. */
700         ring_size = sizeof(gxio_mpipe_edesc_t) * priv->equeue_size;
701         ring_mem = rte_zmalloc(NULL, ring_size, ring_size);
702         if (!ring_mem) {
703                 RTE_LOG(ERR, PMD, "%s: Failed to allocate egress descs.\n",
704                         mpipe_name(priv));
705                 return -ENOMEM;
706         } else {
707                 RTE_LOG(DEBUG, PMD, "%s: eDMA ring memory %p - %p.\n",
708                         mpipe_name(priv), ring_mem,
709                         RTE_PTR_ADD(ring_mem, ring_size - 1));
710         }
711
712         /* Initialize eDMA ring. */
713         rc = gxio_mpipe_equeue_init(&priv->equeue, priv->context, priv->ering,
714                                     priv->channel, ring_mem, ring_size, 0);
715         if (rc < 0) {
716                 RTE_LOG(ERR, PMD, "%s: Failed to init equeue\n",
717                         mpipe_name(priv));
718                 return rc;
719         }
720
721         return 0;
722 }
723
724 static int
725 mpipe_link_init(struct mpipe_dev_priv *priv)
726 {
727         int rc;
728
729         /* Open the link. */
730         rc = gxio_mpipe_link_open(&priv->link, priv->context,
731                                   mpipe_name(priv), GXIO_MPIPE_LINK_AUTO_NONE);
732         if (rc < 0) {
733                 RTE_LOG(ERR, PMD, "%s: Failed to open link.\n",
734                         mpipe_name(priv));
735                 return rc;
736         }
737
738         /* Get the channel index. */
739         rc = gxio_mpipe_link_channel(&priv->link);
740         if (rc < 0) {
741                 RTE_LOG(ERR, PMD, "%s: Bad channel\n",
742                         mpipe_name(priv));
743                 return rc;
744         }
745         priv->channel = rc;
746
747         return 0;
748 }
749
750 static int
751 mpipe_init(struct mpipe_dev_priv *priv)
752 {
753         int rc;
754
755         if (priv->initialized)
756                 return 0;
757
758         rc = mpipe_recv_init(priv);
759         if (rc < 0) {
760                 RTE_LOG(ERR, PMD, "%s: Failed to init rx.\n",
761                         mpipe_name(priv));
762                 return rc;
763         }
764
765         rc = mpipe_xmit_init(priv);
766         if (rc < 0) {
767                 RTE_LOG(ERR, PMD, "%s: Failed to init tx.\n",
768                         mpipe_name(priv));
769                 rte_free(priv);
770                 return rc;
771         }
772
773         priv->initialized = 1;
774
775         return 0;
776 }
777
778 static int
779 mpipe_start(struct rte_eth_dev *dev)
780 {
781         struct mpipe_dev_priv *priv = mpipe_priv(dev);
782         struct mpipe_channel_config config;
783         struct mpipe_rx_queue *rx_queue;
784         struct rte_eth_link eth_link;
785         unsigned queue, buffers = 0;
786         size_t ring_size;
787         void *ring_mem;
788         int rc;
789
790         memset(&eth_link, 0, sizeof(eth_link));
791         mpipe_dev_atomic_write_link_status(dev, &eth_link);
792
793         rc = mpipe_init(priv);
794         if (rc < 0)
795                 return rc;
796
797         /* Initialize NotifRings. */
798         for (queue = 0; queue < priv->nb_rx_queues; queue++) {
799                 rx_queue = mpipe_rx_queue(priv, queue);
800                 ring_size = rx_queue->q.nb_desc * sizeof(gxio_mpipe_idesc_t);
801
802                 ring_mem = rte_malloc(NULL, ring_size, ring_size);
803                 if (!ring_mem) {
804                         RTE_LOG(ERR, PMD, "%s: Failed to alloc rx descs.\n",
805                                 mpipe_name(priv));
806                         return -ENOMEM;
807                 } else {
808                         RTE_LOG(DEBUG, PMD, "%s: iDMA ring %d memory %p - %p.\n",
809                                 mpipe_name(priv), queue, ring_mem,
810                                 RTE_PTR_ADD(ring_mem, ring_size - 1));
811                 }
812
813                 rc = gxio_mpipe_iqueue_init(&rx_queue->iqueue, priv->context,
814                                             priv->first_ring + queue, ring_mem,
815                                             ring_size, 0);
816                 if (rc < 0) {
817                         RTE_LOG(ERR, PMD, "%s: Failed to init rx queue.\n",
818                                 mpipe_name(priv));
819                         return rc;
820                 }
821
822                 rx_queue->rx_ring_mem = ring_mem;
823                 buffers += rx_queue->q.nb_desc;
824         }
825
826         /* Initialize ingress NotifGroup and buckets. */
827         rc = gxio_mpipe_init_notif_group_and_buckets(priv->context,
828                         priv->notif_group, priv->first_ring, priv->nb_rx_queues,
829                         priv->first_bucket, MPIPE_RX_BUCKETS,
830                         GXIO_MPIPE_BUCKET_STATIC_FLOW_AFFINITY);
831         if (rc < 0) {
832                 RTE_LOG(ERR, PMD, "%s: Failed to init group and buckets.\n",
833                         mpipe_name(priv));
834                 return rc;
835         }
836
837         /* Configure the classifier to deliver packets from this port. */
838         config.enable = 1;
839         config.first_bucket = priv->first_bucket;
840         config.num_buckets = MPIPE_RX_BUCKETS;
841         memset(&config.stacks, 0xff, sizeof(config.stacks));
842         config.stacks.stacks[priv->rx_size_code] = priv->stack;
843         config.head_room = priv->rx_offset & RTE_MEMPOOL_ALIGN_MASK;
844
845         rc = mpipe_channel_config(priv->instance, priv->channel,
846                                   &config);
847         if (rc < 0) {
848                 RTE_LOG(ERR, PMD, "%s: Failed to setup classifier.\n",
849                         mpipe_name(priv));
850                 return rc;
851         }
852
853         /* Fill empty buffers into the buffer stack. */
854         mpipe_recv_fill_stack(priv, buffers);
855
856         /* Bring up the link. */
857         mpipe_set_link_up(dev);
858
859         /* Start xmit/recv on queues. */
860         for (queue = 0; queue < priv->nb_tx_queues; queue++)
861                 mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
862         for (queue = 0; queue < priv->nb_rx_queues; queue++)
863                 mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_UP;
864         priv->running = 1;
865
866         return 0;
867 }
868
869 static void
870 mpipe_stop(struct rte_eth_dev *dev)
871 {
872         struct mpipe_dev_priv *priv = mpipe_priv(dev);
873         struct mpipe_channel_config config;
874         unsigned queue;
875         int rc;
876
877         for (queue = 0; queue < priv->nb_tx_queues; queue++)
878                 mpipe_tx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
879         for (queue = 0; queue < priv->nb_rx_queues; queue++)
880                 mpipe_rx_queue(priv, queue)->q.link_status = ETH_LINK_DOWN;
881
882         /* Make sure the link_status writes land. */
883         rte_wmb();
884
885         /*
886          * Wait for link_status change to register with straggling datapath
887          * threads.
888          */
889         mpipe_dp_wait(priv);
890
891         /* Bring down the link. */
892         mpipe_set_link_down(dev);
893
894         /* Remove classifier rules. */
895         memset(&config, 0, sizeof(config));
896         rc = mpipe_channel_config(priv->instance, priv->channel,
897                                   &config);
898         if (rc < 0) {
899                 RTE_LOG(ERR, PMD, "%s: Failed to stop classifier.\n",
900                         mpipe_name(priv));
901         }
902
903         /* Flush completed xmit packets. */
904         mpipe_xmit_flush(priv);
905
906         /* Flush buffer stacks. */
907         mpipe_recv_flush(priv);
908
909         priv->running = 0;
910 }
911
912 static void
913 mpipe_close(struct rte_eth_dev *dev)
914 {
915         struct mpipe_dev_priv *priv = mpipe_priv(dev);
916         if (priv->running)
917                 mpipe_stop(dev);
918 }
919
920 static void
921 mpipe_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
922 {
923         struct mpipe_dev_priv *priv = mpipe_priv(dev);
924         struct mpipe_tx_queue *tx_queue;
925         struct mpipe_rx_queue *rx_queue;
926         unsigned i;
927         uint16_t idx;
928
929         memset(stats, 0, sizeof(*stats));
930
931         for (i = 0; i < priv->nb_tx_queues; i++) {
932                 tx_queue = mpipe_tx_queue(priv, i);
933
934                 stats->opackets += tx_queue->q.stats.packets;
935                 stats->obytes   += tx_queue->q.stats.bytes;
936                 stats->oerrors  += tx_queue->q.stats.errors;
937
938                 idx = tx_queue->q.stat_idx;
939                 if (idx != (uint16_t)-1) {
940                         stats->q_opackets[idx] += tx_queue->q.stats.packets;
941                         stats->q_obytes[idx]   += tx_queue->q.stats.bytes;
942                         stats->q_errors[idx]   += tx_queue->q.stats.errors;
943                 }
944         }
945
946         for (i = 0; i < priv->nb_rx_queues; i++) {
947                 rx_queue = mpipe_rx_queue(priv, i);
948
949                 stats->ipackets  += rx_queue->q.stats.packets;
950                 stats->ibytes    += rx_queue->q.stats.bytes;
951                 stats->ierrors   += rx_queue->q.stats.errors;
952                 stats->rx_nombuf += rx_queue->q.stats.nomem;
953
954                 idx = rx_queue->q.stat_idx;
955                 if (idx != (uint16_t)-1) {
956                         stats->q_ipackets[idx] += rx_queue->q.stats.packets;
957                         stats->q_ibytes[idx]   += rx_queue->q.stats.bytes;
958                         stats->q_errors[idx]   += rx_queue->q.stats.errors;
959                 }
960         }
961 }
962
963 static void
964 mpipe_stats_reset(struct rte_eth_dev *dev)
965 {
966         struct mpipe_dev_priv *priv = mpipe_priv(dev);
967         struct mpipe_tx_queue *tx_queue;
968         struct mpipe_rx_queue *rx_queue;
969         unsigned i;
970
971         for (i = 0; i < priv->nb_tx_queues; i++) {
972                 tx_queue = mpipe_tx_queue(priv, i);
973                 memset(&tx_queue->q.stats, 0, sizeof(tx_queue->q.stats));
974         }
975
976         for (i = 0; i < priv->nb_rx_queues; i++) {
977                 rx_queue = mpipe_rx_queue(priv, i);
978                 memset(&rx_queue->q.stats, 0, sizeof(rx_queue->q.stats));
979         }
980 }
981
982 static int
983 mpipe_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id,
984                               uint8_t stat_idx, uint8_t is_rx)
985 {
986         struct mpipe_dev_priv *priv = mpipe_priv(dev);
987
988         if (is_rx) {
989                 priv->rx_stat_mapping[stat_idx] = queue_id;
990         } else {
991                 priv->tx_stat_mapping[stat_idx] = queue_id;
992         }
993
994         return 0;
995 }
996
997 static int
998 mpipe_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
999                      uint16_t nb_desc, unsigned int socket_id __rte_unused,
1000                      const struct rte_eth_txconf *tx_conf __rte_unused)
1001 {
1002         struct mpipe_tx_queue *tx_queue = dev->data->tx_queues[queue_idx];
1003         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1004         uint16_t idx;
1005
1006         tx_queue = rte_realloc(tx_queue, sizeof(*tx_queue),
1007                                RTE_CACHE_LINE_SIZE);
1008         if (!tx_queue) {
1009                 RTE_LOG(ERR, PMD, "%s: Failed to allocate TX queue.\n",
1010                         mpipe_name(priv));
1011                 return -ENOMEM;
1012         }
1013
1014         memset(&tx_queue->q, 0, sizeof(tx_queue->q));
1015         tx_queue->q.priv = priv;
1016         tx_queue->q.queue_idx = queue_idx;
1017         tx_queue->q.port_id = dev->data->port_id;
1018         tx_queue->q.nb_desc = nb_desc;
1019
1020         tx_queue->q.stat_idx = -1;
1021         for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1022                 if (priv->tx_stat_mapping[idx] == queue_idx)
1023                         tx_queue->q.stat_idx = idx;
1024         }
1025
1026         dev->data->tx_queues[queue_idx] = tx_queue;
1027
1028         return 0;
1029 }
1030
1031 static void
1032 mpipe_tx_queue_release(void *_txq)
1033 {
1034         rte_free(_txq);
1035 }
1036
1037 static int
1038 mpipe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1039                      uint16_t nb_desc, unsigned int socket_id __rte_unused,
1040                      const struct rte_eth_rxconf *rx_conf __rte_unused,
1041                      struct rte_mempool *mp)
1042 {
1043         struct mpipe_rx_queue *rx_queue = dev->data->rx_queues[queue_idx];
1044         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1045         uint16_t idx;
1046         int size, rc;
1047
1048         rc = mpipe_iqueue_size(nb_desc);
1049         if (rc < 0) {
1050                 RTE_LOG(ERR, PMD, "%s: Cannot allocate %d iqueue descs.\n",
1051                         mpipe_name(priv), (int)nb_desc);
1052                 return -ENOMEM;
1053         }
1054
1055         if (rc != nb_desc) {
1056                 RTE_LOG(WARNING, PMD, "%s: Extending RX descs from %d to %d.\n",
1057                         mpipe_name(priv), (int)nb_desc, rc);
1058                 nb_desc = rc;
1059         }
1060
1061         size = sizeof(*rx_queue);
1062         rx_queue = rte_realloc(rx_queue, size, RTE_CACHE_LINE_SIZE);
1063         if (!rx_queue) {
1064                 RTE_LOG(ERR, PMD, "%s: Failed to allocate RX queue.\n",
1065                         mpipe_name(priv));
1066                 return -ENOMEM;
1067         }
1068
1069         memset(&rx_queue->q, 0, sizeof(rx_queue->q));
1070         rx_queue->q.priv = priv;
1071         rx_queue->q.nb_desc = nb_desc;
1072         rx_queue->q.port_id = dev->data->port_id;
1073         rx_queue->q.queue_idx = queue_idx;
1074
1075         if (!priv->rx_mpool) {
1076                 int size = (rte_pktmbuf_data_room_size(mp) -
1077                             RTE_PKTMBUF_HEADROOM -
1078                             MPIPE_RX_IP_ALIGN);
1079
1080                 priv->rx_offset = (sizeof(struct rte_mbuf) +
1081                                    rte_pktmbuf_priv_size(mp) +
1082                                    RTE_PKTMBUF_HEADROOM +
1083                                    MPIPE_RX_IP_ALIGN);
1084                 if (size < 0) {
1085                         RTE_LOG(ERR, PMD, "%s: Bad buffer size %d.\n",
1086                                 mpipe_name(priv),
1087                                 rte_pktmbuf_data_room_size(mp));
1088                         return -ENOMEM;
1089                 }
1090
1091                 priv->rx_size_code = mpipe_buffer_size_index(size);
1092                 priv->rx_mpool = mp;
1093         }
1094
1095         if (priv->rx_mpool != mp) {
1096                 RTE_LOG(WARNING, PMD, "%s: Ignoring multiple buffer pools.\n",
1097                         mpipe_name(priv));
1098         }
1099
1100         rx_queue->q.stat_idx = -1;
1101         for (idx = 0; idx < RTE_ETHDEV_QUEUE_STAT_CNTRS; idx++) {
1102                 if (priv->rx_stat_mapping[idx] == queue_idx)
1103                         rx_queue->q.stat_idx = idx;
1104         }
1105
1106         dev->data->rx_queues[queue_idx] = rx_queue;
1107
1108         return 0;
1109 }
1110
1111 static void
1112 mpipe_rx_queue_release(void *_rxq)
1113 {
1114         rte_free(_rxq);
1115 }
1116
1117 #define MPIPE_XGBE_ENA_HASH_MULTI       \
1118         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_MULTI_SHIFT)
1119 #define MPIPE_XGBE_ENA_HASH_UNI         \
1120         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__ENA_HASH_UNI_SHIFT)
1121 #define MPIPE_XGBE_COPY_ALL             \
1122         (1UL << MPIPE_XAUI_RECEIVE_CONFIGURATION__COPY_ALL_SHIFT)
1123 #define MPIPE_GBE_ENA_MULTI_HASH        \
1124         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__MULTI_HASH_ENA_SHIFT)
1125 #define MPIPE_GBE_ENA_UNI_HASH          \
1126         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__UNI_HASH_ENA_SHIFT)
1127 #define MPIPE_GBE_COPY_ALL              \
1128         (1UL << MPIPE_GBE_NETWORK_CONFIGURATION__COPY_ALL_SHIFT)
1129
1130 static void
1131 mpipe_promiscuous_enable(struct rte_eth_dev *dev)
1132 {
1133         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1134         int64_t reg;
1135         int addr;
1136
1137         if (priv->is_xaui) {
1138                 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1139                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1140                 reg &= ~MPIPE_XGBE_ENA_HASH_MULTI;
1141                 reg &= ~MPIPE_XGBE_ENA_HASH_UNI;
1142                 reg |=  MPIPE_XGBE_COPY_ALL;
1143                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1144         } else {
1145                 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1146                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1147                 reg &= ~MPIPE_GBE_ENA_MULTI_HASH;
1148                 reg &= ~MPIPE_GBE_ENA_UNI_HASH;
1149                 reg |=  MPIPE_GBE_COPY_ALL;
1150                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1151         }
1152 }
1153
1154 static void
1155 mpipe_promiscuous_disable(struct rte_eth_dev *dev)
1156 {
1157         struct mpipe_dev_priv *priv = mpipe_priv(dev);
1158         int64_t reg;
1159         int addr;
1160
1161         if (priv->is_xaui) {
1162                 addr = MPIPE_XAUI_RECEIVE_CONFIGURATION;
1163                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1164                 reg |=  MPIPE_XGBE_ENA_HASH_MULTI;
1165                 reg |=  MPIPE_XGBE_ENA_HASH_UNI;
1166                 reg &= ~MPIPE_XGBE_COPY_ALL;
1167                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1168         } else {
1169                 addr = MPIPE_GBE_NETWORK_CONFIGURATION;
1170                 reg  = gxio_mpipe_link_mac_rd(&priv->link, addr);
1171                 reg |=  MPIPE_GBE_ENA_MULTI_HASH;
1172                 reg |=  MPIPE_GBE_ENA_UNI_HASH;
1173                 reg &= ~MPIPE_GBE_COPY_ALL;
1174                 gxio_mpipe_link_mac_wr(&priv->link, addr, reg);
1175         }
1176 }
1177
1178 static const struct eth_dev_ops mpipe_dev_ops = {
1179         .dev_infos_get           = mpipe_infos_get,
1180         .dev_configure           = mpipe_configure,
1181         .dev_start               = mpipe_start,
1182         .dev_stop                = mpipe_stop,
1183         .dev_close               = mpipe_close,
1184         .stats_get               = mpipe_stats_get,
1185         .stats_reset             = mpipe_stats_reset,
1186         .queue_stats_mapping_set = mpipe_queue_stats_mapping_set,
1187         .tx_queue_setup          = mpipe_tx_queue_setup,
1188         .rx_queue_setup          = mpipe_rx_queue_setup,
1189         .tx_queue_release        = mpipe_tx_queue_release,
1190         .rx_queue_release        = mpipe_rx_queue_release,
1191         .link_update             = mpipe_link_update,
1192         .dev_set_link_up         = mpipe_set_link_up,
1193         .dev_set_link_down       = mpipe_set_link_down,
1194         .promiscuous_enable      = mpipe_promiscuous_enable,
1195         .promiscuous_disable     = mpipe_promiscuous_disable,
1196 };
1197
1198 static inline void
1199 mpipe_xmit_null(struct mpipe_dev_priv *priv, int64_t start, int64_t end)
1200 {
1201         gxio_mpipe_edesc_t null_desc = { { .bound = 1, .ns = 1 } };
1202         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1203         int64_t slot;
1204
1205         for (slot = start; slot < end; slot++) {
1206                 gxio_mpipe_equeue_put_at(equeue, null_desc, slot);
1207         }
1208 }
1209
1210 static void
1211 mpipe_xmit_flush(struct mpipe_dev_priv *priv)
1212 {
1213         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1214         int64_t slot;
1215
1216         /* Post a dummy descriptor and wait for its return. */
1217         slot = gxio_mpipe_equeue_reserve(equeue, 1);
1218         if (slot < 0) {
1219                 RTE_LOG(ERR, PMD, "%s: Failed to reserve stop slot.\n",
1220                         mpipe_name(priv));
1221                 return;
1222         }
1223
1224         mpipe_xmit_null(priv, slot, slot + 1);
1225
1226         while (!gxio_mpipe_equeue_is_complete(equeue, slot, 1)) {
1227                 rte_pause();
1228         }
1229
1230         for (slot = 0; slot < priv->equeue_size; slot++) {
1231                 if (priv->tx_comps[slot])
1232                         rte_pktmbuf_free_seg(priv->tx_comps[slot]);
1233         }
1234 }
1235
1236 static void
1237 mpipe_recv_flush(struct mpipe_dev_priv *priv)
1238 {
1239         uint8_t in_port = priv->port_id;
1240         struct mpipe_rx_queue *rx_queue;
1241         gxio_mpipe_iqueue_t *iqueue;
1242         gxio_mpipe_idesc_t idesc;
1243         struct rte_mbuf *mbuf;
1244         unsigned queue;
1245
1246         /* Release packets on the buffer stack. */
1247         mpipe_recv_flush_stack(priv);
1248
1249         /* Flush packets sitting in recv queues. */
1250         for (queue = 0; queue < priv->nb_rx_queues; queue++) {
1251                 rx_queue = mpipe_rx_queue(priv, queue);
1252                 iqueue = &rx_queue->iqueue;
1253                 while (gxio_mpipe_iqueue_try_get(iqueue, &idesc) >= 0) {
1254                         /* Skip idesc with the 'buffer error' bit set. */
1255                         if (idesc.be)
1256                                 continue;
1257                         mbuf = mpipe_recv_mbuf(priv, &idesc, in_port);
1258                         rte_pktmbuf_free(mbuf);
1259                 }
1260                 rte_free(rx_queue->rx_ring_mem);
1261         }
1262 }
1263
1264 static inline uint16_t
1265 mpipe_do_xmit(struct mpipe_tx_queue *tx_queue, struct rte_mbuf **tx_pkts,
1266               uint16_t nb_pkts)
1267 {
1268         struct mpipe_dev_priv *priv = tx_queue->q.priv;
1269         gxio_mpipe_equeue_t *equeue = &priv->equeue;
1270         unsigned nb_bytes = 0;
1271         unsigned nb_sent = 0;
1272         int nb_slots, i;
1273         uint8_t port_id;
1274
1275         PMD_DEBUG_TX("Trying to transmit %d packets on %s:%d.\n",
1276                      nb_pkts, mpipe_name(tx_queue->q.priv),
1277                      tx_queue->q.queue_idx);
1278
1279         /* Optimistic assumption that we need exactly one slot per packet. */
1280         nb_slots = RTE_MIN(nb_pkts, MPIPE_TX_DESCS / 2);
1281
1282         do {
1283                 struct rte_mbuf *mbuf = NULL, *pkt = NULL;
1284                 int64_t slot;
1285
1286                 /* Reserve eDMA ring slots. */
1287                 slot = gxio_mpipe_equeue_try_reserve_fast(equeue, nb_slots);
1288                 if (unlikely(slot < 0)) {
1289                         break;
1290                 }
1291
1292                 for (i = 0; i < nb_slots; i++) {
1293                         unsigned idx = (slot + i) & (priv->equeue_size - 1);
1294                         rte_prefetch0(priv->tx_comps[idx]);
1295                 }
1296
1297                 /* Fill up slots with descriptor and completion info. */
1298                 for (i = 0; i < nb_slots; i++) {
1299                         unsigned idx = (slot + i) & (priv->equeue_size - 1);
1300                         gxio_mpipe_edesc_t desc;
1301                         struct rte_mbuf *next;
1302
1303                         /* Starting on a new packet? */
1304                         if (likely(!mbuf)) {
1305                                 int room = nb_slots - i;
1306
1307                                 pkt = mbuf = tx_pkts[nb_sent];
1308
1309                                 /* Bail out if we run out of descs. */
1310                                 if (unlikely(pkt->nb_segs > room))
1311                                         break;
1312
1313                                 nb_sent++;
1314                         }
1315
1316                         /* We have a segment to send. */
1317                         next = mbuf->next;
1318
1319                         if (priv->tx_comps[idx])
1320                                 rte_pktmbuf_free_seg(priv->tx_comps[idx]);
1321
1322                         port_id = (mbuf->port < RTE_MAX_ETHPORTS) ?
1323                                                 mbuf->port : priv->port_id;
1324                         desc = (gxio_mpipe_edesc_t) { {
1325                                 .va        = rte_pktmbuf_mtod(mbuf, uintptr_t),
1326                                 .xfer_size = rte_pktmbuf_data_len(mbuf),
1327                                 .bound     = next ? 0 : 1,
1328                                 .stack_idx = mpipe_mbuf_stack_index(priv, mbuf),
1329                                 .size      = priv->rx_size_code,
1330                         } };
1331                         if (mpipe_local.mbuf_push_debt[port_id] > 0) {
1332                                 mpipe_local.mbuf_push_debt[port_id]--;
1333                                 desc.hwb = 1;
1334                                 priv->tx_comps[idx] = NULL;
1335                         } else
1336                                 priv->tx_comps[idx] = mbuf;
1337
1338                         nb_bytes += mbuf->data_len;
1339                         gxio_mpipe_equeue_put_at(equeue, desc, slot + i);
1340
1341                         PMD_DEBUG_TX("%s:%d: Sending packet %p, len %d\n",
1342                                      mpipe_name(priv),
1343                                      tx_queue->q.queue_idx,
1344                                      rte_pktmbuf_mtod(mbuf, void *),
1345                                      rte_pktmbuf_data_len(mbuf));
1346
1347                         mbuf = next;
1348                 }
1349
1350                 if (unlikely(nb_sent < nb_pkts)) {
1351
1352                         /* Fill remaining slots with null descriptors. */
1353                         mpipe_xmit_null(priv, slot + i, slot + nb_slots);
1354
1355                         /*
1356                          * Calculate exact number of descriptors needed for
1357                          * the next go around.
1358                          */
1359                         nb_slots = 0;
1360                         for (i = nb_sent; i < nb_pkts; i++) {
1361                                 nb_slots += tx_pkts[i]->nb_segs;
1362                         }
1363
1364                         nb_slots = RTE_MIN(nb_slots, MPIPE_TX_DESCS / 2);
1365                 }
1366         } while (nb_sent < nb_pkts);
1367
1368         tx_queue->q.stats.packets += nb_sent;
1369         tx_queue->q.stats.bytes   += nb_bytes;
1370
1371         return nb_sent;
1372 }
1373
1374 static inline uint16_t
1375 mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts,
1376               uint16_t nb_pkts)
1377 {
1378         struct mpipe_dev_priv *priv = rx_queue->q.priv;
1379         gxio_mpipe_iqueue_t *iqueue = &rx_queue->iqueue;
1380         gxio_mpipe_idesc_t *first_idesc, *idesc, *last_idesc;
1381         uint8_t in_port = rx_queue->q.port_id;
1382         const unsigned look_ahead = 8;
1383         int room = nb_pkts, rc = 0;
1384         unsigned nb_packets = 0;
1385         unsigned nb_dropped = 0;
1386         unsigned nb_nomem = 0;
1387         unsigned nb_bytes = 0;
1388         unsigned nb_descs, i;
1389
1390         while (room && !rc) {
1391                 if (rx_queue->avail_descs < room) {
1392                         rc = gxio_mpipe_iqueue_try_peek(iqueue,
1393                                                         &rx_queue->next_desc);
1394                         rx_queue->avail_descs = rc < 0 ? 0 : rc;
1395                 }
1396
1397                 if (unlikely(!rx_queue->avail_descs)) {
1398                         break;
1399                 }
1400
1401                 nb_descs = RTE_MIN(room, rx_queue->avail_descs);
1402
1403                 first_idesc = rx_queue->next_desc;
1404                 last_idesc  = first_idesc + nb_descs;
1405
1406                 rx_queue->next_desc   += nb_descs;
1407                 rx_queue->avail_descs -= nb_descs;
1408
1409                 for (i = 1; i < look_ahead; i++) {
1410                         rte_prefetch0(first_idesc + i);
1411                 }
1412
1413                 PMD_DEBUG_RX("%s:%d: Trying to receive %d packets\n",
1414                              mpipe_name(rx_queue->q.priv),
1415                              rx_queue->q.queue_idx,
1416                              nb_descs);
1417
1418                 for (idesc = first_idesc; idesc < last_idesc; idesc++) {
1419                         struct rte_mbuf *mbuf;
1420
1421                         PMD_DEBUG_RX("%s:%d: processing idesc %d/%d\n",
1422                                      mpipe_name(priv),
1423                                      rx_queue->q.queue_idx,
1424                                      nb_packets, nb_descs);
1425
1426                         rte_prefetch0(idesc + look_ahead);
1427
1428                         PMD_DEBUG_RX("%s:%d: idesc %p, %s%s%s%s%s%s%s%s%s%s"
1429                                      "size: %d, bkt: %d, chan: %d, ring: %d, sqn: %lu, va: %lu\n",
1430                                      mpipe_name(priv),
1431                                      rx_queue->q.queue_idx,
1432                                      idesc,
1433                                      idesc->me ? "me, " : "",
1434                                      idesc->tr ? "tr, " : "",
1435                                      idesc->ce ? "ce, " : "",
1436                                      idesc->ct ? "ct, " : "",
1437                                      idesc->cs ? "cs, " : "",
1438                                      idesc->nr ? "nr, " : "",
1439                                      idesc->sq ? "sq, " : "",
1440                                      idesc->ts ? "ts, " : "",
1441                                      idesc->ps ? "ps, " : "",
1442                                      idesc->be ? "be, " : "",
1443                                      idesc->l2_size,
1444                                      idesc->bucket_id,
1445                                      idesc->channel,
1446                                      idesc->notif_ring,
1447                                      (unsigned long)idesc->packet_sqn,
1448                                      (unsigned long)idesc->va);
1449
1450                         if (unlikely(gxio_mpipe_idesc_has_error(idesc))) {
1451                                 nb_dropped++;
1452                                 gxio_mpipe_iqueue_drop(iqueue, idesc);
1453                                 PMD_DEBUG_RX("%s:%d: Descriptor error\n",
1454                                              mpipe_name(rx_queue->q.priv),
1455                                              rx_queue->q.queue_idx);
1456                                 continue;
1457                         }
1458
1459                         if (mpipe_local.mbuf_push_debt[in_port] <
1460                                         MPIPE_BUF_DEBT_THRESHOLD)
1461                                 mpipe_local.mbuf_push_debt[in_port]++;
1462                         else {
1463                                 mbuf = rte_mbuf_raw_alloc(priv->rx_mpool);
1464                                 if (unlikely(!mbuf)) {
1465                                         nb_nomem++;
1466                                         gxio_mpipe_iqueue_drop(iqueue, idesc);
1467                                         PMD_DEBUG_RX("%s:%d: alloc failure\n",
1468                                              mpipe_name(rx_queue->q.priv),
1469                                              rx_queue->q.queue_idx);
1470                                         continue;
1471                                 }
1472
1473                                 mpipe_recv_push(priv, mbuf);
1474                         }
1475
1476                         /* Get and setup the mbuf for the received packet. */
1477                         mbuf = mpipe_recv_mbuf(priv, idesc, in_port);
1478
1479                         /* Update results and statistics counters. */
1480                         rx_pkts[nb_packets] = mbuf;
1481                         nb_bytes += mbuf->pkt_len;
1482                         nb_packets++;
1483                 }
1484
1485                 /*
1486                  * We release the ring in bursts, but do not track and release
1487                  * buckets.  This therefore breaks dynamic flow affinity, but
1488                  * we always operate in static affinity mode, and so we're OK
1489                  * with this optimization.
1490                  */
1491                 gxio_mpipe_iqueue_advance(iqueue, nb_descs);
1492                 gxio_mpipe_credit(iqueue->context, iqueue->ring, -1, nb_descs);
1493
1494                 /*
1495                  * Go around once more if we haven't yet peeked the queue, and
1496                  * if we have more room to receive.
1497                  */
1498                 room = nb_pkts - nb_packets;
1499         }
1500
1501         rx_queue->q.stats.packets += nb_packets;
1502         rx_queue->q.stats.bytes   += nb_bytes;
1503         rx_queue->q.stats.errors  += nb_dropped;
1504         rx_queue->q.stats.nomem   += nb_nomem;
1505
1506         PMD_DEBUG_RX("%s:%d: RX: %d/%d pkts/bytes, %d/%d drops/nomem\n",
1507                      mpipe_name(rx_queue->q.priv), rx_queue->q.queue_idx,
1508                      nb_packets, nb_bytes, nb_dropped, nb_nomem);
1509
1510         return nb_packets;
1511 }
1512
1513 static uint16_t
1514 mpipe_recv_pkts(void *_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1515 {
1516         struct mpipe_rx_queue *rx_queue = _rxq;
1517         uint16_t result = 0;
1518
1519         if (rx_queue) {
1520                 mpipe_dp_enter(rx_queue->q.priv);
1521                 if (likely(rx_queue->q.link_status))
1522                         result = mpipe_do_recv(rx_queue, rx_pkts, nb_pkts);
1523                 mpipe_dp_exit(rx_queue->q.priv);
1524         }
1525
1526         return result;
1527 }
1528
1529 static uint16_t
1530 mpipe_xmit_pkts(void *_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1531 {
1532         struct mpipe_tx_queue *tx_queue = _txq;
1533         uint16_t result = 0;
1534
1535         if (tx_queue) {
1536                 mpipe_dp_enter(tx_queue->q.priv);
1537                 if (likely(tx_queue->q.link_status))
1538                         result = mpipe_do_xmit(tx_queue, tx_pkts, nb_pkts);
1539                 mpipe_dp_exit(tx_queue->q.priv);
1540         }
1541
1542         return result;
1543 }
1544
1545 static int
1546 mpipe_link_mac(const char *ifname, uint8_t *mac)
1547 {
1548         int rc, idx;
1549         char name[GXIO_MPIPE_LINK_NAME_LEN];
1550
1551         for (idx = 0, rc = 0; !rc; idx++) {
1552                 rc = gxio_mpipe_link_enumerate_mac(idx, name, mac);
1553                 if (!rc && !strncmp(name, ifname, GXIO_MPIPE_LINK_NAME_LEN))
1554                         return 0;
1555         }
1556         return -ENODEV;
1557 }
1558
1559 static int
1560 rte_pmd_mpipe_probe_common(struct rte_vdev_driver *drv, const char *ifname,
1561                       const char *params __rte_unused)
1562 {
1563         gxio_mpipe_context_t *context;
1564         struct rte_eth_dev *eth_dev;
1565         struct mpipe_dev_priv *priv;
1566         int instance, rc;
1567         uint8_t *mac;
1568
1569         /* Get the mPIPE instance that the device belongs to. */
1570         instance = gxio_mpipe_link_instance(ifname);
1571         context = mpipe_context(instance);
1572         if (!context) {
1573                 RTE_LOG(ERR, PMD, "%s: No device for link.\n", ifname);
1574                 return -ENODEV;
1575         }
1576
1577         priv = rte_zmalloc(NULL, sizeof(*priv), 0);
1578         if (!priv) {
1579                 RTE_LOG(ERR, PMD, "%s: Failed to allocate priv.\n", ifname);
1580                 return -ENOMEM;
1581         }
1582
1583         memset(&priv->tx_stat_mapping, 0xff, sizeof(priv->tx_stat_mapping));
1584         memset(&priv->rx_stat_mapping, 0xff, sizeof(priv->rx_stat_mapping));
1585         priv->context = context;
1586         priv->instance = instance;
1587         priv->is_xaui = (strncmp(ifname, "xgbe", 4) == 0);
1588         priv->channel = -1;
1589
1590         mac = priv->mac_addr.addr_bytes;
1591         rc = mpipe_link_mac(ifname, mac);
1592         if (rc < 0) {
1593                 RTE_LOG(ERR, PMD, "%s: Failed to enumerate link.\n", ifname);
1594                 rte_free(priv);
1595                 return -ENODEV;
1596         }
1597
1598         eth_dev = rte_eth_dev_allocate(ifname);
1599         if (!eth_dev) {
1600                 RTE_LOG(ERR, PMD, "%s: Failed to allocate device.\n", ifname);
1601                 rte_free(priv);
1602                 return -ENOMEM;
1603         }
1604
1605         RTE_LOG(INFO, PMD, "%s: Initialized mpipe device"
1606                 "(mac %02x:%02x:%02x:%02x:%02x:%02x).\n",
1607                 ifname, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
1608
1609         priv->eth_dev = eth_dev;
1610         priv->port_id = eth_dev->data->port_id;
1611         eth_dev->data->dev_private = priv;
1612         eth_dev->data->mac_addrs = &priv->mac_addr;
1613
1614         eth_dev->data->kdrv = RTE_KDRV_NONE;
1615         eth_dev->driver = NULL;
1616         eth_dev->data->drv_name = drv->driver.name;
1617         eth_dev->data->numa_node = instance;
1618
1619         eth_dev->dev_ops      = &mpipe_dev_ops;
1620         eth_dev->rx_pkt_burst = &mpipe_recv_pkts;
1621         eth_dev->tx_pkt_burst = &mpipe_xmit_pkts;
1622
1623         rc = mpipe_link_init(priv);
1624         if (rc < 0) {
1625                 RTE_LOG(ERR, PMD, "%s: Failed to init link.\n",
1626                         mpipe_name(priv));
1627                 return rc;
1628         }
1629
1630         return 0;
1631 }
1632
1633 static int rte_pmd_mpipe_xgbe_probe(const char *ifname, const char *params);
1634 static int rte_pmd_mpipe_gbe_probe(const char *ifname, const char *params);
1635
1636 static struct rte_vdev_driver pmd_mpipe_xgbe_drv = {
1637         .probe = rte_pmd_mpipe_xgbe_probe,
1638 };
1639
1640 static struct rte_vdev_driver pmd_mpipe_gbe_drv = {
1641         .probe = rte_pmd_mpipe_gbe_probe,
1642 };
1643
1644 static int
1645 rte_pmd_mpipe_xgbe_probe(const char *ifname, const char *params __rte_unused)
1646 {
1647         return rte_pmd_mpipe_probe_common(&pmd_mpipe_xgbe_drv, ifname, params);
1648 }
1649
1650 static int
1651 rte_pmd_mpipe_gbe_probe(const char *ifname, const char *params __rte_unused)
1652 {
1653         return rte_pmd_mpipe_probe_common(&pmd_mpipe_gbe_drv, ifname, params);
1654 }
1655
1656 RTE_PMD_REGISTER_VDEV(net_mpipe_xgbe, pmd_mpipe_xgbe_drv);
1657 RTE_PMD_REGISTER_ALIAS(net_mpipe_xgbe, xgbe);
1658 RTE_PMD_REGISTER_VDEV(net_mpipe_gbe, pmd_mpipe_gbe_drv);
1659 RTE_PMD_REGISTER_ALIAS(net_mpipe_gbe, gbe);
1660
1661 static void __attribute__((constructor, used))
1662 mpipe_init_contexts(void)
1663 {
1664         struct mpipe_context *context;
1665         int rc, instance;
1666
1667         for (instance = 0; instance < GXIO_MPIPE_INSTANCE_MAX; instance++) {
1668                 context = &mpipe_contexts[instance];
1669
1670                 rte_spinlock_init(&context->lock);
1671                 rc = gxio_mpipe_init(&context->context, instance);
1672                 if (rc < 0)
1673                         break;
1674         }
1675
1676         mpipe_instances = instance;
1677 }