net/mrvl: add Rx/Tx support
[dpdk.git] / drivers / net / mrvl / mrvl_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Semihalf. All rights reserved.
5  *
6  *   Redistribution and use in source and binary forms, with or without
7  *   modification, are permitted provided that the following conditions
8  *   are met:
9  *
10  *     * Redistributions of source code must retain the above copyright
11  *       notice, this list of conditions and the following disclaimer.
12  *     * Redistributions in binary form must reproduce the above copyright
13  *       notice, this list of conditions and the following disclaimer in
14  *       the documentation and/or other materials provided with the
15  *       distribution.
16  *     * Neither the name of Semihalf nor the names of its
17  *       contributors may be used to endorse or promote products derived
18  *       from this software without specific prior written permission.
19  *
20  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #include <rte_ethdev.h>
34 #include <rte_kvargs.h>
35 #include <rte_log.h>
36 #include <rte_malloc.h>
37 #include <rte_vdev.h>
38
39 /* Unluckily, container_of is defined by both DPDK and MUSDK,
40  * we'll declare only one version.
41  *
42  * Note that it is not used in this PMD anyway.
43  */
44 #ifdef container_of
45 #undef container_of
46 #endif
47
48 #include <drivers/mv_pp2.h>
49 #include <drivers/mv_pp2_bpool.h>
50 #include <drivers/mv_pp2_hif.h>
51
52 #include <fcntl.h>
53 #include <linux/ethtool.h>
54 #include <linux/sockios.h>
55 #include <net/if.h>
56 #include <net/if_arp.h>
57 #include <sys/ioctl.h>
58 #include <sys/socket.h>
59 #include <sys/stat.h>
60 #include <sys/types.h>
61
62 #include "mrvl_ethdev.h"
63 #include "mrvl_qos.h"
64
65 /* bitmask with reserved hifs */
66 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
67 /* bitmask with reserved bpools */
68 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
69 /* maximum number of available hifs */
70 #define MRVL_MUSDK_HIFS_MAX 9
71
72 #define MRVL_MAC_ADDRS_MAX 1
73 /* prefetch shift */
74 #define MRVL_MUSDK_PREFETCH_SHIFT 2
75
76 #define MRVL_MATCH_LEN 16
77 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
78 /* Maximum allowable packet size */
79 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
80
81 #define MRVL_IFACE_NAME_ARG "iface"
82 #define MRVL_CFG_ARG "cfg"
83
84 #define MRVL_BURST_SIZE 64
85
86 #define MRVL_ARP_LENGTH 28
87
88 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
89
90 #define MRVL_COOKIE_HIGH_ADDR_SHIFT     (sizeof(pp2_cookie_t) * 8)
91 #define MRVL_COOKIE_HIGH_ADDR_MASK      (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
92
93 static const char * const valid_args[] = {
94         MRVL_IFACE_NAME_ARG,
95         MRVL_CFG_ARG,
96         NULL
97 };
98
99 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
100 static struct pp2_hif *hifs[RTE_MAX_LCORE];
101 static int used_bpools[PP2_NUM_PKT_PROC] = {
102         MRVL_MUSDK_BPOOLS_RESERVED,
103         MRVL_MUSDK_BPOOLS_RESERVED
104 };
105
106 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
107 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
108 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
109
110 /*
111  * To use buffer harvesting based on loopback port shadow queue structure
112  * was introduced for buffers information bookkeeping.
113  *
114  * Before sending the packet, related buffer information (pp2_buff_inf) is
115  * stored in shadow queue. After packet is transmitted no longer used
116  * packet buffer is released back to it's original hardware pool,
117  * on condition it originated from interface.
118  * In case it  was generated by application itself i.e: mbuf->port field is
119  * 0xff then its released to software mempool.
120  */
121 struct mrvl_shadow_txq {
122         int head;           /* write index - used when sending buffers */
123         int tail;           /* read index - used when releasing buffers */
124         u16 size;           /* queue occupied size */
125         u16 num_to_release; /* number of buffers sent, that can be released */
126         struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
127 };
128
129 struct mrvl_rxq {
130         struct mrvl_priv *priv;
131         struct rte_mempool *mp;
132         int queue_id;
133         int port_id;
134 };
135
136 struct mrvl_txq {
137         struct mrvl_priv *priv;
138         int queue_id;
139         int port_id;
140 };
141
142 /*
143  * Every tx queue should have dedicated shadow tx queue.
144  *
145  * Ports assigned by DPDK might not start at zero or be continuous so
146  * as a workaround define shadow queues for each possible port so that
147  * we eventually fit somewhere.
148  */
149 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
150
151 /** Number of ports configured. */
152 int mrvl_ports_nb;
153 static int mrvl_lcore_first;
154 static int mrvl_lcore_last;
155
156 static inline int
157 mrvl_get_bpool_size(int pp2_id, int pool_id)
158 {
159         int i;
160         int size = 0;
161
162         for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
163                 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
164
165         return size;
166 }
167
168 static inline int
169 mrvl_reserve_bit(int *bitmap, int max)
170 {
171         int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
172
173         if (n >= max)
174                 return -1;
175
176         *bitmap |= 1 << n;
177
178         return n;
179 }
180
181 /**
182  * Ethernet device configuration.
183  *
184  * Prepare the driver for a given number of TX and RX queues.
185  *
186  * @param dev
187  *   Pointer to Ethernet device structure.
188  *
189  * @return
190  *   0 on success, negative error value otherwise.
191  */
192 static int
193 mrvl_dev_configure(struct rte_eth_dev *dev)
194 {
195         struct mrvl_priv *priv = dev->data->dev_private;
196         int ret;
197
198         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
199                 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
200                         dev->data->dev_conf.rxmode.mq_mode);
201                 return -EINVAL;
202         }
203
204         if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
205                 RTE_LOG(INFO, PMD,
206                         "L2 CRC stripping is always enabled in hw\n");
207                 dev->data->dev_conf.rxmode.hw_strip_crc = 1;
208         }
209
210         if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
211                 RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
212                 return -EINVAL;
213         }
214
215         if (dev->data->dev_conf.rxmode.split_hdr_size) {
216                 RTE_LOG(INFO, PMD, "Split headers not supported\n");
217                 return -EINVAL;
218         }
219
220         if (dev->data->dev_conf.rxmode.enable_scatter) {
221                 RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
222                 return -EINVAL;
223         }
224
225         if (dev->data->dev_conf.rxmode.enable_lro) {
226                 RTE_LOG(INFO, PMD, "LRO not supported\n");
227                 return -EINVAL;
228         }
229
230         ret = mrvl_configure_rxqs(priv, dev->data->port_id,
231                                   dev->data->nb_rx_queues);
232         if (ret < 0)
233                 return ret;
234
235         priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
236         priv->nb_rx_queues = dev->data->nb_rx_queues;
237
238         return 0;
239 }
240
241 /**
242  * DPDK callback to bring the link up.
243  *
244  * @param dev
245  *   Pointer to Ethernet device structure.
246  *
247  * @return
248  *   0 on success, negative error value otherwise.
249  */
250 static int
251 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
252 {
253         struct mrvl_priv *priv = dev->data->dev_private;
254         int ret;
255
256         ret = pp2_ppio_enable(priv->ppio);
257         if (ret)
258                 return ret;
259
260         dev->data->dev_link.link_status = ETH_LINK_UP;
261
262         return ret;
263 }
264
265 /**
266  * DPDK callback to bring the link down.
267  *
268  * @param dev
269  *   Pointer to Ethernet device structure.
270  *
271  * @return
272  *   0 on success, negative error value otherwise.
273  */
274 static int
275 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
276 {
277         struct mrvl_priv *priv = dev->data->dev_private;
278         int ret;
279
280         ret = pp2_ppio_disable(priv->ppio);
281         if (ret)
282                 return ret;
283
284         dev->data->dev_link.link_status = ETH_LINK_DOWN;
285
286         return ret;
287 }
288
289 /**
290  * DPDK callback to start the device.
291  *
292  * @param dev
293  *   Pointer to Ethernet device structure.
294  *
295  * @return
296  *   0 on success, negative errno value on failure.
297  */
298 static int
299 mrvl_dev_start(struct rte_eth_dev *dev)
300 {
301         struct mrvl_priv *priv = dev->data->dev_private;
302         char match[MRVL_MATCH_LEN];
303         int ret;
304
305         snprintf(match, sizeof(match), "ppio-%d:%d",
306                  priv->pp_id, priv->ppio_id);
307         priv->ppio_params.match = match;
308
309         /*
310          * Calculate the maximum bpool size for refill feature to 1.5 of the
311          * configured size. In case the bpool size will exceed this value,
312          * superfluous buffers will be removed
313          */
314         priv->bpool_max_size = priv->bpool_init_size +
315                               (priv->bpool_init_size >> 1);
316         /*
317          * Calculate the minimum bpool size for refill feature as follows:
318          * 2 default burst sizes multiply by number of rx queues.
319          * If the bpool size will be below this value, new buffers will
320          * be added to the pool.
321          */
322         priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
323
324         ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
325         if (ret)
326                 return ret;
327
328         /* For default QoS config, don't start classifier. */
329         if (mrvl_qos_cfg) {
330                 ret = mrvl_start_qos_mapping(priv);
331                 if (ret) {
332                         pp2_ppio_deinit(priv->ppio);
333                         return ret;
334                 }
335         }
336
337         ret = mrvl_dev_set_link_up(dev);
338         if (ret)
339                 goto out;
340
341         return 0;
342 out:
343         pp2_ppio_deinit(priv->ppio);
344         return ret;
345 }
346
347 /**
348  * Flush receive queues.
349  *
350  * @param dev
351  *   Pointer to Ethernet device structure.
352  */
353 static void
354 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
355 {
356         int i;
357
358         RTE_LOG(INFO, PMD, "Flushing rx queues\n");
359         for (i = 0; i < dev->data->nb_rx_queues; i++) {
360                 int ret, num;
361
362                 do {
363                         struct mrvl_rxq *q = dev->data->rx_queues[i];
364                         struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
365
366                         num = MRVL_PP2_RXD_MAX;
367                         ret = pp2_ppio_recv(q->priv->ppio,
368                                             q->priv->rxq_map[q->queue_id].tc,
369                                             q->priv->rxq_map[q->queue_id].inq,
370                                             descs, (uint16_t *)&num);
371                 } while (ret == 0 && num);
372         }
373 }
374
375 /**
376  * Flush transmit shadow queues.
377  *
378  * @param dev
379  *   Pointer to Ethernet device structure.
380  */
381 static void
382 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
383 {
384         int i;
385
386         RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
387         for (i = 0; i < RTE_MAX_LCORE; i++) {
388                 struct mrvl_shadow_txq *sq =
389                         &shadow_txqs[dev->data->port_id][i];
390
391                 while (sq->tail != sq->head) {
392                         uint64_t addr = cookie_addr_high |
393                                         sq->ent[sq->tail].buff.cookie;
394                         rte_pktmbuf_free((struct rte_mbuf *)addr);
395                         sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
396                 }
397
398                 memset(sq, 0, sizeof(*sq));
399         }
400 }
401
402 /**
403  * Flush hardware bpool (buffer-pool).
404  *
405  * @param dev
406  *   Pointer to Ethernet device structure.
407  */
408 static void
409 mrvl_flush_bpool(struct rte_eth_dev *dev)
410 {
411         struct mrvl_priv *priv = dev->data->dev_private;
412         uint32_t num;
413         int ret;
414
415         ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
416         if (ret) {
417                 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
418                 return;
419         }
420
421         while (num--) {
422                 struct pp2_buff_inf inf;
423                 uint64_t addr;
424
425                 ret = pp2_bpool_get_buff(hifs[rte_lcore_id()], priv->bpool,
426                                          &inf);
427                 if (ret)
428                         break;
429
430                 addr = cookie_addr_high | inf.cookie;
431                 rte_pktmbuf_free((struct rte_mbuf *)addr);
432         }
433 }
434
435 /**
436  * DPDK callback to stop the device.
437  *
438  * @param dev
439  *   Pointer to Ethernet device structure.
440  */
441 static void
442 mrvl_dev_stop(struct rte_eth_dev *dev)
443 {
444         struct mrvl_priv *priv = dev->data->dev_private;
445
446         mrvl_dev_set_link_down(dev);
447         mrvl_flush_rx_queues(dev);
448         mrvl_flush_tx_shadow_queues(dev);
449         if (priv->qos_tbl)
450                 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
451         pp2_ppio_deinit(priv->ppio);
452         priv->ppio = NULL;
453 }
454
455 /**
456  * DPDK callback to close the device.
457  *
458  * @param dev
459  *   Pointer to Ethernet device structure.
460  */
461 static void
462 mrvl_dev_close(struct rte_eth_dev *dev)
463 {
464         struct mrvl_priv *priv = dev->data->dev_private;
465         size_t i;
466
467         for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
468                 struct pp2_ppio_tc_params *tc_params =
469                         &priv->ppio_params.inqs_params.tcs_params[i];
470
471                 if (tc_params->inqs_params) {
472                         rte_free(tc_params->inqs_params);
473                         tc_params->inqs_params = NULL;
474                 }
475         }
476
477         mrvl_flush_bpool(dev);
478 }
479
480 /**
481  * DPDK callback to set the primary MAC address.
482  *
483  * @param dev
484  *   Pointer to Ethernet device structure.
485  * @param mac_addr
486  *   MAC address to register.
487  */
488 static void
489 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
490 {
491         struct mrvl_priv *priv = dev->data->dev_private;
492
493         pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
494         /*
495          * TODO
496          * Port stops sending packets if pp2_ppio_set_mac_addr()
497          * was called after pp2_ppio_enable(). As a quick fix issue
498          * enable port once again.
499          */
500         pp2_ppio_enable(priv->ppio);
501 }
502
503 /**
504  * DPDK callback to get information about the device.
505  *
506  * @param dev
507  *   Pointer to Ethernet device structure (unused).
508  * @param info
509  *   Info structure output buffer.
510  */
511 static void
512 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
513                    struct rte_eth_dev_info *info)
514 {
515         info->max_rx_queues = MRVL_PP2_RXQ_MAX;
516         info->max_tx_queues = MRVL_PP2_TXQ_MAX;
517         info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
518
519         info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
520         info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
521         info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
522
523         info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
524         info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
525         info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
526
527         /* By default packets are dropped if no descriptors are available */
528         info->default_rxconf.rx_drop_en = 1;
529
530         info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
531 }
532
533 /**
534  * DPDK callback to get information about specific receive queue.
535  *
536  * @param dev
537  *   Pointer to Ethernet device structure.
538  * @param rx_queue_id
539  *   Receive queue index.
540  * @param qinfo
541  *   Receive queue information structure.
542  */
543 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
544                               struct rte_eth_rxq_info *qinfo)
545 {
546         struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
547         struct mrvl_priv *priv = dev->data->dev_private;
548         int inq = priv->rxq_map[rx_queue_id].inq;
549         int tc = priv->rxq_map[rx_queue_id].tc;
550         struct pp2_ppio_tc_params *tc_params =
551                 &priv->ppio_params.inqs_params.tcs_params[tc];
552
553         qinfo->mp = q->mp;
554         qinfo->nb_desc = tc_params->inqs_params[inq].size;
555 }
556
557 /**
558  * DPDK callback to get information about specific transmit queue.
559  *
560  * @param dev
561  *   Pointer to Ethernet device structure.
562  * @param tx_queue_id
563  *   Transmit queue index.
564  * @param qinfo
565  *   Transmit queue information structure.
566  */
567 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
568                               struct rte_eth_txq_info *qinfo)
569 {
570         struct mrvl_priv *priv = dev->data->dev_private;
571
572         qinfo->nb_desc =
573                 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
574 }
575
576 /**
577  * Release buffers to hardware bpool (buffer-pool)
578  *
579  * @param rxq
580  *   Receive queue pointer.
581  * @param num
582  *   Number of buffers to release to bpool.
583  *
584  * @return
585  *   0 on success, negative error value otherwise.
586  */
587 static int
588 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
589 {
590         struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
591         struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
592         int i, ret;
593         unsigned int core_id = rte_lcore_id();
594         struct pp2_hif *hif = hifs[core_id];
595         struct pp2_bpool *bpool = rxq->priv->bpool;
596
597         ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
598         if (ret)
599                 return ret;
600
601         if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
602                 cookie_addr_high =
603                         (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
604
605         for (i = 0; i < num; i++) {
606                 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
607                         != cookie_addr_high) {
608                         RTE_LOG(ERR, PMD,
609                                 "mbuf virtual addr high 0x%lx out of range\n",
610                                 (uint64_t)mbufs[i] >> 32);
611                         goto out;
612                 }
613
614                 entries[i].buff.addr =
615                         rte_mbuf_data_dma_addr_default(mbufs[i]);
616                 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
617                 entries[i].bpool = bpool;
618         }
619
620         pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
621         mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
622
623         if (i != num)
624                 goto out;
625
626         return 0;
627 out:
628         for (; i < num; i++)
629                 rte_pktmbuf_free(mbufs[i]);
630
631         return -1;
632 }
633
634 /**
635  * DPDK callback to configure the receive queue.
636  *
637  * @param dev
638  *   Pointer to Ethernet device structure.
639  * @param idx
640  *   RX queue index.
641  * @param desc
642  *   Number of descriptors to configure in queue.
643  * @param socket
644  *   NUMA socket on which memory must be allocated.
645  * @param conf
646  *   Thresholds parameters (unused_).
647  * @param mp
648  *   Memory pool for buffer allocations.
649  *
650  * @return
651  *   0 on success, negative error value otherwise.
652  */
653 static int
654 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
655                     unsigned int socket,
656                     const struct rte_eth_rxconf *conf __rte_unused,
657                     struct rte_mempool *mp)
658 {
659         struct mrvl_priv *priv = dev->data->dev_private;
660         struct mrvl_rxq *rxq;
661         uint32_t min_size,
662                  max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
663         int ret, tc, inq;
664
665         if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
666                 /*
667                  * Unknown TC mapping, mapping will not have a correct queue.
668                  */
669                 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
670                         idx, priv->ppio_id);
671                 return -EFAULT;
672         }
673
674         min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
675                    MRVL_PKT_EFFEC_OFFS;
676         if (min_size < max_rx_pkt_len) {
677                 RTE_LOG(ERR, PMD,
678                         "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
679                         max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
680                         MRVL_PKT_EFFEC_OFFS,
681                         max_rx_pkt_len);
682                 return -EINVAL;
683         }
684
685         if (dev->data->rx_queues[idx]) {
686                 rte_free(dev->data->rx_queues[idx]);
687                 dev->data->rx_queues[idx] = NULL;
688         }
689
690         rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
691         if (!rxq)
692                 return -ENOMEM;
693
694         rxq->priv = priv;
695         rxq->mp = mp;
696         rxq->queue_id = idx;
697         rxq->port_id = dev->data->port_id;
698         mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
699
700         tc = priv->rxq_map[rxq->queue_id].tc,
701         inq = priv->rxq_map[rxq->queue_id].inq;
702         priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
703                 desc;
704
705         ret = mrvl_fill_bpool(rxq, desc);
706         if (ret) {
707                 rte_free(rxq);
708                 return ret;
709         }
710
711         priv->bpool_init_size += desc;
712
713         dev->data->rx_queues[idx] = rxq;
714
715         return 0;
716 }
717
718 /**
719  * DPDK callback to release the receive queue.
720  *
721  * @param rxq
722  *   Generic receive queue pointer.
723  */
724 static void
725 mrvl_rx_queue_release(void *rxq)
726 {
727         struct mrvl_rxq *q = rxq;
728         struct pp2_ppio_tc_params *tc_params;
729         int i, num, tc, inq;
730
731         if (!q)
732                 return;
733
734         tc = q->priv->rxq_map[q->queue_id].tc;
735         inq = q->priv->rxq_map[q->queue_id].inq;
736         tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
737         num = tc_params->inqs_params[inq].size;
738         for (i = 0; i < num; i++) {
739                 struct pp2_buff_inf inf;
740                 uint64_t addr;
741
742                 pp2_bpool_get_buff(hifs[rte_lcore_id()], q->priv->bpool, &inf);
743                 addr = cookie_addr_high | inf.cookie;
744                 rte_pktmbuf_free((struct rte_mbuf *)addr);
745         }
746
747         rte_free(q);
748 }
749
750 /**
751  * DPDK callback to configure the transmit queue.
752  *
753  * @param dev
754  *   Pointer to Ethernet device structure.
755  * @param idx
756  *   Transmit queue index.
757  * @param desc
758  *   Number of descriptors to configure in the queue.
759  * @param socket
760  *   NUMA socket on which memory must be allocated.
761  * @param conf
762  *   Thresholds parameters (unused).
763  *
764  * @return
765  *   0 on success, negative error value otherwise.
766  */
767 static int
768 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
769                     unsigned int socket,
770                     const struct rte_eth_txconf *conf __rte_unused)
771 {
772         struct mrvl_priv *priv = dev->data->dev_private;
773         struct mrvl_txq *txq;
774
775         if (dev->data->tx_queues[idx]) {
776                 rte_free(dev->data->tx_queues[idx]);
777                 dev->data->tx_queues[idx] = NULL;
778         }
779
780         txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
781         if (!txq)
782                 return -ENOMEM;
783
784         txq->priv = priv;
785         txq->queue_id = idx;
786         txq->port_id = dev->data->port_id;
787         dev->data->tx_queues[idx] = txq;
788
789         priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
790         priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
791
792         return 0;
793 }
794
795 /**
796  * DPDK callback to release the transmit queue.
797  *
798  * @param txq
799  *   Generic transmit queue pointer.
800  */
801 static void
802 mrvl_tx_queue_release(void *txq)
803 {
804         struct mrvl_txq *q = txq;
805
806         if (!q)
807                 return;
808
809         rte_free(q);
810 }
811
812 static const struct eth_dev_ops mrvl_ops = {
813         .dev_configure = mrvl_dev_configure,
814         .dev_start = mrvl_dev_start,
815         .dev_stop = mrvl_dev_stop,
816         .dev_set_link_up = mrvl_dev_set_link_up,
817         .dev_set_link_down = mrvl_dev_set_link_down,
818         .dev_close = mrvl_dev_close,
819         .mac_addr_set = mrvl_mac_addr_set,
820         .dev_infos_get = mrvl_dev_infos_get,
821         .rxq_info_get = mrvl_rxq_info_get,
822         .txq_info_get = mrvl_txq_info_get,
823         .rx_queue_setup = mrvl_rx_queue_setup,
824         .rx_queue_release = mrvl_rx_queue_release,
825         .tx_queue_setup = mrvl_tx_queue_setup,
826         .tx_queue_release = mrvl_tx_queue_release,
827 };
828
829 /**
830  * DPDK callback for receive.
831  *
832  * @param rxq
833  *   Generic pointer to the receive queue.
834  * @param rx_pkts
835  *   Array to store received packets.
836  * @param nb_pkts
837  *   Maximum number of packets in array.
838  *
839  * @return
840  *   Number of packets successfully received.
841  */
842 static uint16_t
843 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
844 {
845         struct mrvl_rxq *q = rxq;
846         struct pp2_ppio_desc descs[nb_pkts];
847         struct pp2_bpool *bpool;
848         int i, ret, rx_done = 0;
849         int num;
850         unsigned int core_id = rte_lcore_id();
851
852         if (unlikely(!q->priv->ppio))
853                 return 0;
854
855         bpool = q->priv->bpool;
856
857         ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
858                             q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
859         if (unlikely(ret < 0)) {
860                 RTE_LOG(ERR, PMD, "Failed to receive packets\n");
861                 return 0;
862         }
863         mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
864
865         for (i = 0; i < nb_pkts; i++) {
866                 struct rte_mbuf *mbuf;
867                 enum pp2_inq_desc_status status;
868                 uint64_t addr;
869
870                 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
871                         struct pp2_ppio_desc *pref_desc;
872                         u64 pref_addr;
873
874                         pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
875                         pref_addr = cookie_addr_high |
876                                     pp2_ppio_inq_desc_get_cookie(pref_desc);
877                         rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
878                         rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
879                 }
880
881                 addr = cookie_addr_high |
882                        pp2_ppio_inq_desc_get_cookie(&descs[i]);
883                 mbuf = (struct rte_mbuf *)addr;
884                 rte_pktmbuf_reset(mbuf);
885
886                 /* drop packet in case of mac, overrun or resource error */
887                 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
888                 if (unlikely(status != PP2_DESC_ERR_OK)) {
889                         struct pp2_buff_inf binf = {
890                                 .addr = rte_mbuf_data_dma_addr_default(mbuf),
891                                 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
892                         };
893
894                         pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
895                         mrvl_port_bpool_size
896                                 [bpool->pp2_id][bpool->id][core_id]++;
897                         continue;
898                 }
899
900                 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
901                 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
902                 mbuf->data_len = mbuf->pkt_len;
903                 mbuf->port = q->port_id;
904
905                 rx_pkts[rx_done++] = mbuf;
906         }
907
908         if (rte_spinlock_trylock(&q->priv->lock) == 1) {
909                 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
910
911                 if (unlikely(num <= q->priv->bpool_min_size ||
912                              (!rx_done && num < q->priv->bpool_init_size))) {
913                         ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
914                         if (ret)
915                                 RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
916                 } else if (unlikely(num > q->priv->bpool_max_size)) {
917                         int i;
918                         int pkt_to_remove = num - q->priv->bpool_init_size;
919                         struct rte_mbuf *mbuf;
920                         struct pp2_buff_inf buff;
921
922                         RTE_LOG(DEBUG, PMD,
923                                 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
924                                 bpool->pp2_id, q->priv->ppio->port_id,
925                                 bpool->id, pkt_to_remove, num,
926                                 q->priv->bpool_init_size);
927
928                         for (i = 0; i < pkt_to_remove; i++) {
929                                 pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
930                                 mbuf = (struct rte_mbuf *)
931                                         (cookie_addr_high | buff.cookie);
932                                 rte_pktmbuf_free(mbuf);
933                         }
934                         mrvl_port_bpool_size
935                                 [bpool->pp2_id][bpool->id][core_id] -=
936                                                                 pkt_to_remove;
937                 }
938                 rte_spinlock_unlock(&q->priv->lock);
939         }
940
941         return rx_done;
942 }
943
944 /**
945  * Release already sent buffers to bpool (buffer-pool).
946  *
947  * @param ppio
948  *   Pointer to the port structure.
949  * @param hif
950  *   Pointer to the MUSDK hardware interface.
951  * @param sq
952  *   Pointer to the shadow queue.
953  * @param qid
954  *   Queue id number.
955  * @param force
956  *   Force releasing packets.
957  */
958 static inline void
959 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
960                        struct mrvl_shadow_txq *sq, int qid, int force)
961 {
962         struct buff_release_entry *entry;
963         uint16_t nb_done = 0, num = 0, skip_bufs = 0;
964         int i, core_id = rte_lcore_id();
965
966         pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
967
968         sq->num_to_release += nb_done;
969
970         if (likely(!force &&
971                    sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
972                 return;
973
974         nb_done = sq->num_to_release;
975         sq->num_to_release = 0;
976
977         for (i = 0; i < nb_done; i++) {
978                 entry = &sq->ent[sq->tail + num];
979                 if (unlikely(!entry->buff.addr)) {
980                         RTE_LOG(ERR, PMD,
981                                 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
982                                 sq->tail, (u64)entry->buff.cookie,
983                                 (u64)entry->buff.addr);
984                         skip_bufs = 1;
985                         goto skip;
986                 }
987
988                 if (unlikely(!entry->bpool)) {
989                         struct rte_mbuf *mbuf;
990
991                         mbuf = (struct rte_mbuf *)
992                                (cookie_addr_high | entry->buff.cookie);
993                         rte_pktmbuf_free(mbuf);
994                         skip_bufs = 1;
995                         goto skip;
996                 }
997
998                 mrvl_port_bpool_size
999                         [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
1000                 num++;
1001                 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
1002                         goto skip;
1003                 continue;
1004 skip:
1005                 if (likely(num))
1006                         pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1007                 num += skip_bufs;
1008                 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1009                 sq->size -= num;
1010                 num = 0;
1011         }
1012
1013         if (likely(num)) {
1014                 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1015                 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1016                 sq->size -= num;
1017         }
1018 }
1019
1020 /**
1021  * DPDK callback for transmit.
1022  *
1023  * @param txq
1024  *   Generic pointer transmit queue.
1025  * @param tx_pkts
1026  *   Packets to transmit.
1027  * @param nb_pkts
1028  *   Number of packets in array.
1029  *
1030  * @return
1031  *   Number of packets successfully transmitted.
1032  */
1033 static uint16_t
1034 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1035 {
1036         struct mrvl_txq *q = txq;
1037         struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
1038         struct pp2_hif *hif = hifs[rte_lcore_id()];
1039         struct pp2_ppio_desc descs[nb_pkts];
1040         int i;
1041         uint16_t num, sq_free_size;
1042
1043         if (unlikely(!q->priv->ppio))
1044                 return 0;
1045
1046         if (sq->size)
1047                 mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
1048
1049         sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
1050         if (unlikely(nb_pkts > sq_free_size)) {
1051                 RTE_LOG(DEBUG, PMD,
1052                         "No room in shadow queue for %d packets! %d packets will be sent.\n",
1053                         nb_pkts, sq_free_size);
1054                 nb_pkts = sq_free_size;
1055         }
1056
1057         for (i = 0; i < nb_pkts; i++) {
1058                 struct rte_mbuf *mbuf = tx_pkts[i];
1059
1060                 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1061                         struct rte_mbuf *pref_pkt_hdr;
1062
1063                         pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
1064                         rte_mbuf_prefetch_part1(pref_pkt_hdr);
1065                         rte_mbuf_prefetch_part2(pref_pkt_hdr);
1066                 }
1067
1068                 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
1069                 sq->ent[sq->head].buff.addr =
1070                         rte_mbuf_data_dma_addr_default(mbuf);
1071                 sq->ent[sq->head].bpool =
1072                         (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
1073                          NULL : mrvl_port_to_bpool_lookup[mbuf->port];
1074                 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
1075                 sq->size++;
1076
1077                 pp2_ppio_outq_desc_reset(&descs[i]);
1078                 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
1079                                                  rte_pktmbuf_mtophys(mbuf));
1080                 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
1081                 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
1082                                                rte_pktmbuf_pkt_len(mbuf));
1083         }
1084
1085         num = nb_pkts;
1086         pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
1087         /* number of packets that were not sent */
1088         if (unlikely(num > nb_pkts)) {
1089                 for (i = nb_pkts; i < num; i++) {
1090                         sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
1091                                 MRVL_PP2_TX_SHADOWQ_MASK;
1092                 }
1093                 sq->size -= num - nb_pkts;
1094         }
1095
1096         return nb_pkts;
1097 }
1098
1099 /**
1100  * Initialize packet processor.
1101  *
1102  * @return
1103  *   0 on success, negative error value otherwise.
1104  */
1105 static int
1106 mrvl_init_pp2(void)
1107 {
1108         struct pp2_init_params init_params;
1109
1110         memset(&init_params, 0, sizeof(init_params));
1111         init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
1112         init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
1113
1114         return pp2_init(&init_params);
1115 }
1116
1117 /**
1118  * Deinitialize packet processor.
1119  *
1120  * @return
1121  *   0 on success, negative error value otherwise.
1122  */
1123 static void
1124 mrvl_deinit_pp2(void)
1125 {
1126         pp2_deinit();
1127 }
1128
1129 /**
1130  * Create private device structure.
1131  *
1132  * @param dev_name
1133  *   Pointer to the port name passed in the initialization parameters.
1134  *
1135  * @return
1136  *   Pointer to the newly allocated private device structure.
1137  */
1138 static struct mrvl_priv *
1139 mrvl_priv_create(const char *dev_name)
1140 {
1141         struct pp2_bpool_params bpool_params;
1142         char match[MRVL_MATCH_LEN];
1143         struct mrvl_priv *priv;
1144         int ret, bpool_bit;
1145
1146         priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
1147         if (!priv)
1148                 return NULL;
1149
1150         ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
1151                                        &priv->pp_id, &priv->ppio_id);
1152         if (ret)
1153                 goto out_free_priv;
1154
1155         bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
1156                                      PP2_BPOOL_NUM_POOLS);
1157         if (bpool_bit < 0)
1158                 goto out_free_priv;
1159         priv->bpool_bit = bpool_bit;
1160
1161         snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
1162                  priv->bpool_bit);
1163         memset(&bpool_params, 0, sizeof(bpool_params));
1164         bpool_params.match = match;
1165         bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
1166         ret = pp2_bpool_init(&bpool_params, &priv->bpool);
1167         if (ret)
1168                 goto out_clear_bpool_bit;
1169
1170         priv->ppio_params.type = PP2_PPIO_T_NIC;
1171         rte_spinlock_init(&priv->lock);
1172
1173         return priv;
1174 out_clear_bpool_bit:
1175         used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
1176 out_free_priv:
1177         rte_free(priv);
1178         return NULL;
1179 }
1180
1181 /**
1182  * Create device representing Ethernet port.
1183  *
1184  * @param name
1185  *   Pointer to the port's name.
1186  *
1187  * @return
1188  *   0 on success, negative error value otherwise.
1189  */
1190 static int
1191 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
1192 {
1193         int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
1194         struct rte_eth_dev *eth_dev;
1195         struct mrvl_priv *priv;
1196         struct ifreq req;
1197
1198         eth_dev = rte_eth_dev_allocate(name);
1199         if (!eth_dev)
1200                 return -ENOMEM;
1201
1202         priv = mrvl_priv_create(name);
1203         if (!priv) {
1204                 ret = -ENOMEM;
1205                 goto out_free_dev;
1206         }
1207
1208         eth_dev->data->mac_addrs =
1209                 rte_zmalloc("mac_addrs",
1210                             ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
1211         if (!eth_dev->data->mac_addrs) {
1212                 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
1213                 ret = -ENOMEM;
1214                 goto out_free_priv;
1215         }
1216
1217         memset(&req, 0, sizeof(req));
1218         strcpy(req.ifr_name, name);
1219         ret = ioctl(fd, SIOCGIFHWADDR, &req);
1220         if (ret)
1221                 goto out_free_mac;
1222
1223         memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1224                req.ifr_addr.sa_data, ETHER_ADDR_LEN);
1225
1226         eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
1227         eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
1228         eth_dev->data->dev_private = priv;
1229         eth_dev->device = &vdev->device;
1230         eth_dev->dev_ops = &mrvl_ops;
1231
1232         return 0;
1233 out_free_mac:
1234         rte_free(eth_dev->data->mac_addrs);
1235 out_free_dev:
1236         rte_eth_dev_release_port(eth_dev);
1237 out_free_priv:
1238         rte_free(priv);
1239
1240         return ret;
1241 }
1242
1243 /**
1244  * Cleanup previously created device representing Ethernet port.
1245  *
1246  * @param name
1247  *   Pointer to the port name.
1248  */
1249 static void
1250 mrvl_eth_dev_destroy(const char *name)
1251 {
1252         struct rte_eth_dev *eth_dev;
1253         struct mrvl_priv *priv;
1254
1255         eth_dev = rte_eth_dev_allocated(name);
1256         if (!eth_dev)
1257                 return;
1258
1259         priv = eth_dev->data->dev_private;
1260         pp2_bpool_deinit(priv->bpool);
1261         rte_free(priv);
1262         rte_free(eth_dev->data->mac_addrs);
1263         rte_eth_dev_release_port(eth_dev);
1264 }
1265
1266 /**
1267  * Callback used by rte_kvargs_process() during argument parsing.
1268  *
1269  * @param key
1270  *   Pointer to the parsed key (unused).
1271  * @param value
1272  *   Pointer to the parsed value.
1273  * @param extra_args
1274  *   Pointer to the extra arguments which contains address of the
1275  *   table of pointers to parsed interface names.
1276  *
1277  * @return
1278  *   Always 0.
1279  */
1280 static int
1281 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
1282                  void *extra_args)
1283 {
1284         const char **ifnames = extra_args;
1285
1286         ifnames[mrvl_ports_nb++] = value;
1287
1288         return 0;
1289 }
1290
1291 /**
1292  * Initialize per-lcore MUSDK hardware interfaces (hifs).
1293  *
1294  * @return
1295  *   0 on success, negative error value otherwise.
1296  */
1297 static int
1298 mrvl_init_hifs(void)
1299 {
1300         struct pp2_hif_params params;
1301         char match[MRVL_MATCH_LEN];
1302         int i, ret;
1303
1304         RTE_LCORE_FOREACH(i) {
1305                 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
1306                 if (ret < 0)
1307                         return ret;
1308
1309                 snprintf(match, sizeof(match), "hif-%d", ret);
1310                 memset(&params, 0, sizeof(params));
1311                 params.match = match;
1312                 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
1313                 ret = pp2_hif_init(&params, &hifs[i]);
1314                 if (ret) {
1315                         RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", i);
1316                         return ret;
1317                 }
1318         }
1319
1320         return 0;
1321 }
1322
1323 /**
1324  * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
1325  */
1326 static void
1327 mrvl_deinit_hifs(void)
1328 {
1329         int i;
1330
1331         RTE_LCORE_FOREACH(i) {
1332                 if (hifs[i])
1333                         pp2_hif_deinit(hifs[i]);
1334         }
1335 }
1336
1337 static void mrvl_set_first_last_cores(int core_id)
1338 {
1339         if (core_id < mrvl_lcore_first)
1340                 mrvl_lcore_first = core_id;
1341
1342         if (core_id > mrvl_lcore_last)
1343                 mrvl_lcore_last = core_id;
1344 }
1345
1346 /**
1347  * DPDK callback to register the virtual device.
1348  *
1349  * @param vdev
1350  *   Pointer to the virtual device.
1351  *
1352  * @return
1353  *   0 on success, negative error value otherwise.
1354  */
1355 static int
1356 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
1357 {
1358         struct rte_kvargs *kvlist;
1359         const char *ifnames[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
1360         int ret = -EINVAL;
1361         uint32_t i, ifnum, cfgnum, core_id;
1362         const char *params;
1363
1364         params = rte_vdev_device_args(vdev);
1365         if (!params)
1366                 return -EINVAL;
1367
1368         kvlist = rte_kvargs_parse(params, valid_args);
1369         if (!kvlist)
1370                 return -EINVAL;
1371
1372         ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
1373         if (ifnum > RTE_DIM(ifnames))
1374                 goto out_free_kvlist;
1375
1376         rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
1377                            mrvl_get_ifnames, &ifnames);
1378
1379         cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
1380         if (cfgnum > 1) {
1381                 RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
1382                 goto out_free_kvlist;
1383         } else if (cfgnum == 1) {
1384                 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
1385                                    mrvl_get_qoscfg, &mrvl_qos_cfg);
1386         }
1387
1388         /*
1389          * ret == -EEXIST is correct, it means DMA
1390          * has been already initialized (by another PMD).
1391          */
1392         ret = mv_sys_dma_mem_init(RTE_MRVL_MUSDK_DMA_MEMSIZE);
1393         if (ret < 0 && ret != -EEXIST)
1394                 goto out_free_kvlist;
1395
1396         ret = mrvl_init_pp2();
1397         if (ret) {
1398                 RTE_LOG(ERR, PMD, "Failed to init PP!\n");
1399                 goto out_deinit_dma;
1400         }
1401
1402         ret = mrvl_init_hifs();
1403         if (ret)
1404                 goto out_deinit_hifs;
1405
1406         for (i = 0; i < ifnum; i++) {
1407                 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames[i]);
1408                 ret = mrvl_eth_dev_create(vdev, ifnames[i]);
1409                 if (ret)
1410                         goto out_cleanup;
1411         }
1412
1413         rte_kvargs_free(kvlist);
1414
1415         memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
1416
1417         mrvl_lcore_first = RTE_MAX_LCORE;
1418         mrvl_lcore_last = 0;
1419
1420         RTE_LCORE_FOREACH(core_id) {
1421                 mrvl_set_first_last_cores(core_id);
1422         }
1423
1424         return 0;
1425 out_cleanup:
1426         for (; i > 0; i--)
1427                 mrvl_eth_dev_destroy(ifnames[i]);
1428 out_deinit_hifs:
1429         mrvl_deinit_hifs();
1430         mrvl_deinit_pp2();
1431 out_deinit_dma:
1432         mv_sys_dma_mem_destroy();
1433 out_free_kvlist:
1434         rte_kvargs_free(kvlist);
1435
1436         return ret;
1437 }
1438
1439 /**
1440  * DPDK callback to remove virtual device.
1441  *
1442  * @param vdev
1443  *   Pointer to the removed virtual device.
1444  *
1445  * @return
1446  *   0 on success, negative error value otherwise.
1447  */
1448 static int
1449 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
1450 {
1451         int i;
1452         const char *name;
1453
1454         name = rte_vdev_device_name(vdev);
1455         if (!name)
1456                 return -EINVAL;
1457
1458         RTE_LOG(INFO, PMD, "Removing %s\n", name);
1459
1460         for (i = 0; i < rte_eth_dev_count(); i++) {
1461                 char ifname[RTE_ETH_NAME_MAX_LEN];
1462
1463                 rte_eth_dev_get_name_by_port(i, ifname);
1464                 mrvl_eth_dev_destroy(ifname);
1465         }
1466
1467         mrvl_deinit_hifs();
1468         mrvl_deinit_pp2();
1469         mv_sys_dma_mem_destroy();
1470
1471         return 0;
1472 }
1473
1474 static struct rte_vdev_driver pmd_mrvl_drv = {
1475         .probe = rte_pmd_mrvl_probe,
1476         .remove = rte_pmd_mrvl_remove,
1477 };
1478
1479 RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
1480 RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);