4615505b0ebbbf4407731e92077c932e4af3e19b
[dpdk.git] / drivers / net / mrvl / mrvl_ethdev.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2017 Marvell International Ltd.
5  *   Copyright(c) 2017 Semihalf.
6  *   All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Semihalf nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34
35 #include <rte_ethdev_driver.h>
36 #include <rte_kvargs.h>
37 #include <rte_log.h>
38 #include <rte_malloc.h>
39 #include <rte_bus_vdev.h>
40
41 /* Unluckily, container_of is defined by both DPDK and MUSDK,
42  * we'll declare only one version.
43  *
44  * Note that it is not used in this PMD anyway.
45  */
46 #ifdef container_of
47 #undef container_of
48 #endif
49
50 #include <fcntl.h>
51 #include <linux/ethtool.h>
52 #include <linux/sockios.h>
53 #include <net/if.h>
54 #include <net/if_arp.h>
55 #include <sys/ioctl.h>
56 #include <sys/socket.h>
57 #include <sys/stat.h>
58 #include <sys/types.h>
59
60 #include "mrvl_ethdev.h"
61 #include "mrvl_qos.h"
62
63 /* bitmask with reserved hifs */
64 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
65 /* bitmask with reserved bpools */
66 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
67 /* bitmask with reserved kernel RSS tables */
68 #define MRVL_MUSDK_RSS_RESERVED 0x01
69 /* maximum number of available hifs */
70 #define MRVL_MUSDK_HIFS_MAX 9
71
72 /* prefetch shift */
73 #define MRVL_MUSDK_PREFETCH_SHIFT 2
74
75 /* TCAM has 25 entries reserved for uc/mc filter entries */
76 #define MRVL_MAC_ADDRS_MAX 25
77 #define MRVL_MATCH_LEN 16
78 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
79 /* Maximum allowable packet size */
80 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
81
82 #define MRVL_IFACE_NAME_ARG "iface"
83 #define MRVL_CFG_ARG "cfg"
84
85 #define MRVL_BURST_SIZE 64
86
87 #define MRVL_ARP_LENGTH 28
88
89 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
90
91 #define MRVL_COOKIE_HIGH_ADDR_SHIFT     (sizeof(pp2_cookie_t) * 8)
92 #define MRVL_COOKIE_HIGH_ADDR_MASK      (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
93
94 /* Memory size (in bytes) for MUSDK dma buffers */
95 #define MRVL_MUSDK_DMA_MEMSIZE 41943040
96
97 /** Port Rx offload capabilities */
98 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
99                           DEV_RX_OFFLOAD_JUMBO_FRAME | \
100                           DEV_RX_OFFLOAD_CRC_STRIP | \
101                           DEV_RX_OFFLOAD_CHECKSUM)
102
103
104 static const char * const valid_args[] = {
105         MRVL_IFACE_NAME_ARG,
106         MRVL_CFG_ARG,
107         NULL
108 };
109
110 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
111 static struct pp2_hif *hifs[RTE_MAX_LCORE];
112 static int used_bpools[PP2_NUM_PKT_PROC] = {
113         MRVL_MUSDK_BPOOLS_RESERVED,
114         MRVL_MUSDK_BPOOLS_RESERVED
115 };
116
117 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
118 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
119 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
120
121 struct mrvl_ifnames {
122         const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
123         int idx;
124 };
125
126 /*
127  * To use buffer harvesting based on loopback port shadow queue structure
128  * was introduced for buffers information bookkeeping.
129  *
130  * Before sending the packet, related buffer information (pp2_buff_inf) is
131  * stored in shadow queue. After packet is transmitted no longer used
132  * packet buffer is released back to it's original hardware pool,
133  * on condition it originated from interface.
134  * In case it  was generated by application itself i.e: mbuf->port field is
135  * 0xff then its released to software mempool.
136  */
137 struct mrvl_shadow_txq {
138         int head;           /* write index - used when sending buffers */
139         int tail;           /* read index - used when releasing buffers */
140         u16 size;           /* queue occupied size */
141         u16 num_to_release; /* number of buffers sent, that can be released */
142         struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
143 };
144
145 struct mrvl_rxq {
146         struct mrvl_priv *priv;
147         struct rte_mempool *mp;
148         int queue_id;
149         int port_id;
150         int cksum_enabled;
151         uint64_t bytes_recv;
152         uint64_t drop_mac;
153 };
154
155 struct mrvl_txq {
156         struct mrvl_priv *priv;
157         int queue_id;
158         int port_id;
159         uint64_t bytes_sent;
160         struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
161 };
162
163 static int mrvl_lcore_first;
164 static int mrvl_lcore_last;
165 static int mrvl_dev_num;
166
167 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
168 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
169                         struct pp2_hif *hif, unsigned int core_id,
170                         struct mrvl_shadow_txq *sq, int qid, int force);
171
172 static inline int
173 mrvl_get_bpool_size(int pp2_id, int pool_id)
174 {
175         int i;
176         int size = 0;
177
178         for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
179                 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
180
181         return size;
182 }
183
184 static inline int
185 mrvl_reserve_bit(int *bitmap, int max)
186 {
187         int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
188
189         if (n >= max)
190                 return -1;
191
192         *bitmap |= 1 << n;
193
194         return n;
195 }
196
197 static int
198 mrvl_init_hif(int core_id)
199 {
200         struct pp2_hif_params params;
201         char match[MRVL_MATCH_LEN];
202         int ret;
203
204         ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
205         if (ret < 0) {
206                 RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
207                 return ret;
208         }
209
210         snprintf(match, sizeof(match), "hif-%d", ret);
211         memset(&params, 0, sizeof(params));
212         params.match = match;
213         params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
214         ret = pp2_hif_init(&params, &hifs[core_id]);
215         if (ret) {
216                 RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", core_id);
217                 return ret;
218         }
219
220         return 0;
221 }
222
223 static inline struct pp2_hif*
224 mrvl_get_hif(struct mrvl_priv *priv, int core_id)
225 {
226         int ret;
227
228         if (likely(hifs[core_id] != NULL))
229                 return hifs[core_id];
230
231         rte_spinlock_lock(&priv->lock);
232
233         ret = mrvl_init_hif(core_id);
234         if (ret < 0) {
235                 RTE_LOG(ERR, PMD, "Failed to allocate hif %d\n", core_id);
236                 goto out;
237         }
238
239         if (core_id < mrvl_lcore_first)
240                 mrvl_lcore_first = core_id;
241
242         if (core_id > mrvl_lcore_last)
243                 mrvl_lcore_last = core_id;
244 out:
245         rte_spinlock_unlock(&priv->lock);
246
247         return hifs[core_id];
248 }
249
250 /**
251  * Configure rss based on dpdk rss configuration.
252  *
253  * @param priv
254  *   Pointer to private structure.
255  * @param rss_conf
256  *   Pointer to RSS configuration.
257  *
258  * @return
259  *   0 on success, negative error value otherwise.
260  */
261 static int
262 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
263 {
264         if (rss_conf->rss_key)
265                 RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n");
266
267         if (rss_conf->rss_hf == 0) {
268                 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
269         } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
270                 priv->ppio_params.inqs_params.hash_type =
271                         PP2_PPIO_HASH_T_2_TUPLE;
272         } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
273                 priv->ppio_params.inqs_params.hash_type =
274                         PP2_PPIO_HASH_T_5_TUPLE;
275                 priv->rss_hf_tcp = 1;
276         } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
277                 priv->ppio_params.inqs_params.hash_type =
278                         PP2_PPIO_HASH_T_5_TUPLE;
279                 priv->rss_hf_tcp = 0;
280         } else {
281                 return -EINVAL;
282         }
283
284         return 0;
285 }
286
287 /**
288  * Ethernet device configuration.
289  *
290  * Prepare the driver for a given number of TX and RX queues and
291  * configure RSS.
292  *
293  * @param dev
294  *   Pointer to Ethernet device structure.
295  *
296  * @return
297  *   0 on success, negative error value otherwise.
298  */
299 static int
300 mrvl_dev_configure(struct rte_eth_dev *dev)
301 {
302         struct mrvl_priv *priv = dev->data->dev_private;
303         int ret;
304
305         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
306             dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
307                 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
308                         dev->data->dev_conf.rxmode.mq_mode);
309                 return -EINVAL;
310         }
311
312         if (!(dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
313                 RTE_LOG(INFO, PMD,
314                         "L2 CRC stripping is always enabled in hw\n");
315                 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
316         }
317
318         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
319                 RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
320                 return -EINVAL;
321         }
322
323         if (dev->data->dev_conf.rxmode.split_hdr_size) {
324                 RTE_LOG(INFO, PMD, "Split headers not supported\n");
325                 return -EINVAL;
326         }
327
328         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) {
329                 RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
330                 return -EINVAL;
331         }
332
333         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
334                 RTE_LOG(INFO, PMD, "LRO not supported\n");
335                 return -EINVAL;
336         }
337
338         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
339                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
340                                  ETHER_HDR_LEN - ETHER_CRC_LEN;
341
342         ret = mrvl_configure_rxqs(priv, dev->data->port_id,
343                                   dev->data->nb_rx_queues);
344         if (ret < 0)
345                 return ret;
346
347         priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
348         priv->ppio_params.maintain_stats = 1;
349         priv->nb_rx_queues = dev->data->nb_rx_queues;
350
351         if (dev->data->nb_rx_queues == 1 &&
352             dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
353                 RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n");
354                 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
355
356                 return 0;
357         }
358
359         return mrvl_configure_rss(priv,
360                                   &dev->data->dev_conf.rx_adv_conf.rss_conf);
361 }
362
363 /**
364  * DPDK callback to change the MTU.
365  *
366  * Setting the MTU affects hardware MRU (packets larger than the MRU
367  * will be dropped).
368  *
369  * @param dev
370  *   Pointer to Ethernet device structure.
371  * @param mtu
372  *   New MTU.
373  *
374  * @return
375  *   0 on success, negative error value otherwise.
376  */
377 static int
378 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
379 {
380         struct mrvl_priv *priv = dev->data->dev_private;
381         /* extra MV_MH_SIZE bytes are required for Marvell tag */
382         uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
383         int ret;
384
385         if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
386                 return -EINVAL;
387
388         if (!priv->ppio)
389                 return 0;
390
391         ret = pp2_ppio_set_mru(priv->ppio, mru);
392         if (ret)
393                 return ret;
394
395         return pp2_ppio_set_mtu(priv->ppio, mtu);
396 }
397
398 /**
399  * DPDK callback to bring the link up.
400  *
401  * @param dev
402  *   Pointer to Ethernet device structure.
403  *
404  * @return
405  *   0 on success, negative error value otherwise.
406  */
407 static int
408 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
409 {
410         struct mrvl_priv *priv = dev->data->dev_private;
411         int ret;
412
413         if (!priv->ppio)
414                 return -EPERM;
415
416         ret = pp2_ppio_enable(priv->ppio);
417         if (ret)
418                 return ret;
419
420         /*
421          * mtu/mru can be updated if pp2_ppio_enable() was called at least once
422          * as pp2_ppio_enable() changes port->t_mode from default 0 to
423          * PP2_TRAFFIC_INGRESS_EGRESS.
424          *
425          * Set mtu to default DPDK value here.
426          */
427         ret = mrvl_mtu_set(dev, dev->data->mtu);
428         if (ret)
429                 pp2_ppio_disable(priv->ppio);
430
431         return ret;
432 }
433
434 /**
435  * DPDK callback to bring the link down.
436  *
437  * @param dev
438  *   Pointer to Ethernet device structure.
439  *
440  * @return
441  *   0 on success, negative error value otherwise.
442  */
443 static int
444 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
445 {
446         struct mrvl_priv *priv = dev->data->dev_private;
447
448         if (!priv->ppio)
449                 return -EPERM;
450
451         return pp2_ppio_disable(priv->ppio);
452 }
453
454 /**
455  * DPDK callback to start the device.
456  *
457  * @param dev
458  *   Pointer to Ethernet device structure.
459  *
460  * @return
461  *   0 on success, negative errno value on failure.
462  */
463 static int
464 mrvl_dev_start(struct rte_eth_dev *dev)
465 {
466         struct mrvl_priv *priv = dev->data->dev_private;
467         char match[MRVL_MATCH_LEN];
468         int ret = 0, def_init_size;
469
470         snprintf(match, sizeof(match), "ppio-%d:%d",
471                  priv->pp_id, priv->ppio_id);
472         priv->ppio_params.match = match;
473
474         /*
475          * Calculate the minimum bpool size for refill feature as follows:
476          * 2 default burst sizes multiply by number of rx queues.
477          * If the bpool size will be below this value, new buffers will
478          * be added to the pool.
479          */
480         priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
481
482         /* In case initial bpool size configured in queues setup is
483          * smaller than minimum size add more buffers
484          */
485         def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
486         if (priv->bpool_init_size < def_init_size) {
487                 int buffs_to_add = def_init_size - priv->bpool_init_size;
488
489                 priv->bpool_init_size += buffs_to_add;
490                 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
491                 if (ret)
492                         RTE_LOG(ERR, PMD, "Failed to add buffers to bpool\n");
493         }
494
495         /*
496          * Calculate the maximum bpool size for refill feature as follows:
497          * maximum number of descriptors in rx queue multiply by number
498          * of rx queues plus minimum bpool size.
499          * In case the bpool size will exceed this value, superfluous buffers
500          * will be removed
501          */
502         priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
503                                 priv->bpool_min_size;
504
505         ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
506         if (ret) {
507                 RTE_LOG(ERR, PMD, "Failed to init ppio\n");
508                 return ret;
509         }
510
511         /*
512          * In case there are some some stale uc/mc mac addresses flush them
513          * here. It cannot be done during mrvl_dev_close() as port information
514          * is already gone at that point (due to pp2_ppio_deinit() in
515          * mrvl_dev_stop()).
516          */
517         if (!priv->uc_mc_flushed) {
518                 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
519                 if (ret) {
520                         RTE_LOG(ERR, PMD,
521                                 "Failed to flush uc/mc filter list\n");
522                         goto out;
523                 }
524                 priv->uc_mc_flushed = 1;
525         }
526
527         if (!priv->vlan_flushed) {
528                 ret = pp2_ppio_flush_vlan(priv->ppio);
529                 if (ret) {
530                         RTE_LOG(ERR, PMD, "Failed to flush vlan list\n");
531                         /*
532                          * TODO
533                          * once pp2_ppio_flush_vlan() is supported jump to out
534                          * goto out;
535                          */
536                 }
537                 priv->vlan_flushed = 1;
538         }
539
540         /* For default QoS config, don't start classifier. */
541         if (mrvl_qos_cfg) {
542                 ret = mrvl_start_qos_mapping(priv);
543                 if (ret) {
544                         RTE_LOG(ERR, PMD, "Failed to setup QoS mapping\n");
545                         goto out;
546                 }
547         }
548
549         ret = mrvl_dev_set_link_up(dev);
550         if (ret) {
551                 RTE_LOG(ERR, PMD, "Failed to set link up\n");
552                 goto out;
553         }
554
555         return 0;
556 out:
557         RTE_LOG(ERR, PMD, "Failed to start device\n");
558         pp2_ppio_deinit(priv->ppio);
559         return ret;
560 }
561
562 /**
563  * Flush receive queues.
564  *
565  * @param dev
566  *   Pointer to Ethernet device structure.
567  */
568 static void
569 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
570 {
571         int i;
572
573         RTE_LOG(INFO, PMD, "Flushing rx queues\n");
574         for (i = 0; i < dev->data->nb_rx_queues; i++) {
575                 int ret, num;
576
577                 do {
578                         struct mrvl_rxq *q = dev->data->rx_queues[i];
579                         struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
580
581                         num = MRVL_PP2_RXD_MAX;
582                         ret = pp2_ppio_recv(q->priv->ppio,
583                                             q->priv->rxq_map[q->queue_id].tc,
584                                             q->priv->rxq_map[q->queue_id].inq,
585                                             descs, (uint16_t *)&num);
586                 } while (ret == 0 && num);
587         }
588 }
589
590 /**
591  * Flush transmit shadow queues.
592  *
593  * @param dev
594  *   Pointer to Ethernet device structure.
595  */
596 static void
597 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
598 {
599         int i, j;
600         struct mrvl_txq *txq;
601
602         RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
603         for (i = 0; i < dev->data->nb_tx_queues; i++) {
604                 txq = (struct mrvl_txq *)dev->data->tx_queues[i];
605
606                 for (j = 0; j < RTE_MAX_LCORE; j++) {
607                         struct mrvl_shadow_txq *sq;
608
609                         if (!hifs[j])
610                                 continue;
611
612                         sq = &txq->shadow_txqs[j];
613                         mrvl_free_sent_buffers(txq->priv->ppio,
614                                 hifs[j], j, sq, txq->queue_id, 1);
615                         while (sq->tail != sq->head) {
616                                 uint64_t addr = cookie_addr_high |
617                                         sq->ent[sq->tail].buff.cookie;
618                                 rte_pktmbuf_free(
619                                         (struct rte_mbuf *)addr);
620                                 sq->tail = (sq->tail + 1) &
621                                             MRVL_PP2_TX_SHADOWQ_MASK;
622                         }
623                         memset(sq, 0, sizeof(*sq));
624                 }
625         }
626 }
627
628 /**
629  * Flush hardware bpool (buffer-pool).
630  *
631  * @param dev
632  *   Pointer to Ethernet device structure.
633  */
634 static void
635 mrvl_flush_bpool(struct rte_eth_dev *dev)
636 {
637         struct mrvl_priv *priv = dev->data->dev_private;
638         struct pp2_hif *hif;
639         uint32_t num;
640         int ret;
641         unsigned int core_id = rte_lcore_id();
642
643         if (core_id == LCORE_ID_ANY)
644                 core_id = 0;
645
646         hif = mrvl_get_hif(priv, core_id);
647
648         ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
649         if (ret) {
650                 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
651                 return;
652         }
653
654         while (num--) {
655                 struct pp2_buff_inf inf;
656                 uint64_t addr;
657
658                 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
659                 if (ret)
660                         break;
661
662                 addr = cookie_addr_high | inf.cookie;
663                 rte_pktmbuf_free((struct rte_mbuf *)addr);
664         }
665 }
666
667 /**
668  * DPDK callback to stop the device.
669  *
670  * @param dev
671  *   Pointer to Ethernet device structure.
672  */
673 static void
674 mrvl_dev_stop(struct rte_eth_dev *dev)
675 {
676         struct mrvl_priv *priv = dev->data->dev_private;
677
678         mrvl_dev_set_link_down(dev);
679         mrvl_flush_rx_queues(dev);
680         mrvl_flush_tx_shadow_queues(dev);
681         if (priv->qos_tbl) {
682                 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
683                 priv->qos_tbl = NULL;
684         }
685         pp2_ppio_deinit(priv->ppio);
686         priv->ppio = NULL;
687 }
688
689 /**
690  * DPDK callback to close the device.
691  *
692  * @param dev
693  *   Pointer to Ethernet device structure.
694  */
695 static void
696 mrvl_dev_close(struct rte_eth_dev *dev)
697 {
698         struct mrvl_priv *priv = dev->data->dev_private;
699         size_t i;
700
701         for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
702                 struct pp2_ppio_tc_params *tc_params =
703                         &priv->ppio_params.inqs_params.tcs_params[i];
704
705                 if (tc_params->inqs_params) {
706                         rte_free(tc_params->inqs_params);
707                         tc_params->inqs_params = NULL;
708                 }
709         }
710
711         mrvl_flush_bpool(dev);
712 }
713
714 /**
715  * DPDK callback to retrieve physical link information.
716  *
717  * @param dev
718  *   Pointer to Ethernet device structure.
719  * @param wait_to_complete
720  *   Wait for request completion (ignored).
721  *
722  * @return
723  *   0 on success, negative error value otherwise.
724  */
725 static int
726 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
727 {
728         /*
729          * TODO
730          * once MUSDK provides necessary API use it here
731          */
732         struct mrvl_priv *priv = dev->data->dev_private;
733         struct ethtool_cmd edata;
734         struct ifreq req;
735         int ret, fd, link_up;
736
737         if (!priv->ppio)
738                 return -EPERM;
739
740         edata.cmd = ETHTOOL_GSET;
741
742         strcpy(req.ifr_name, dev->data->name);
743         req.ifr_data = (void *)&edata;
744
745         fd = socket(AF_INET, SOCK_DGRAM, 0);
746         if (fd == -1)
747                 return -EFAULT;
748
749         ret = ioctl(fd, SIOCETHTOOL, &req);
750         if (ret == -1) {
751                 close(fd);
752                 return -EFAULT;
753         }
754
755         close(fd);
756
757         switch (ethtool_cmd_speed(&edata)) {
758         case SPEED_10:
759                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
760                 break;
761         case SPEED_100:
762                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
763                 break;
764         case SPEED_1000:
765                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
766                 break;
767         case SPEED_10000:
768                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
769                 break;
770         default:
771                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
772         }
773
774         dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
775                                                          ETH_LINK_HALF_DUPLEX;
776         dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
777                                                            ETH_LINK_FIXED;
778         pp2_ppio_get_link_state(priv->ppio, &link_up);
779         dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
780
781         return 0;
782 }
783
784 /**
785  * DPDK callback to enable promiscuous mode.
786  *
787  * @param dev
788  *   Pointer to Ethernet device structure.
789  */
790 static void
791 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
792 {
793         struct mrvl_priv *priv = dev->data->dev_private;
794         int ret;
795
796         if (!priv->ppio)
797                 return;
798
799         ret = pp2_ppio_set_promisc(priv->ppio, 1);
800         if (ret)
801                 RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
802 }
803
804 /**
805  * DPDK callback to enable allmulti mode.
806  *
807  * @param dev
808  *   Pointer to Ethernet device structure.
809  */
810 static void
811 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
812 {
813         struct mrvl_priv *priv = dev->data->dev_private;
814         int ret;
815
816         if (!priv->ppio)
817                 return;
818
819         ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
820         if (ret)
821                 RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
822 }
823
824 /**
825  * DPDK callback to disable promiscuous mode.
826  *
827  * @param dev
828  *   Pointer to Ethernet device structure.
829  */
830 static void
831 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
832 {
833         struct mrvl_priv *priv = dev->data->dev_private;
834         int ret;
835
836         if (!priv->ppio)
837                 return;
838
839         ret = pp2_ppio_set_promisc(priv->ppio, 0);
840         if (ret)
841                 RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
842 }
843
844 /**
845  * DPDK callback to disable allmulticast mode.
846  *
847  * @param dev
848  *   Pointer to Ethernet device structure.
849  */
850 static void
851 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
852 {
853         struct mrvl_priv *priv = dev->data->dev_private;
854         int ret;
855
856         if (!priv->ppio)
857                 return;
858
859         ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
860         if (ret)
861                 RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
862 }
863
864 /**
865  * DPDK callback to remove a MAC address.
866  *
867  * @param dev
868  *   Pointer to Ethernet device structure.
869  * @param index
870  *   MAC address index.
871  */
872 static void
873 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
874 {
875         struct mrvl_priv *priv = dev->data->dev_private;
876         char buf[ETHER_ADDR_FMT_SIZE];
877         int ret;
878
879         if (!priv->ppio)
880                 return;
881
882         ret = pp2_ppio_remove_mac_addr(priv->ppio,
883                                        dev->data->mac_addrs[index].addr_bytes);
884         if (ret) {
885                 ether_format_addr(buf, sizeof(buf),
886                                   &dev->data->mac_addrs[index]);
887                 RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
888         }
889 }
890
891 /**
892  * DPDK callback to add a MAC address.
893  *
894  * @param dev
895  *   Pointer to Ethernet device structure.
896  * @param mac_addr
897  *   MAC address to register.
898  * @param index
899  *   MAC address index.
900  * @param vmdq
901  *   VMDq pool index to associate address with (unused).
902  *
903  * @return
904  *   0 on success, negative error value otherwise.
905  */
906 static int
907 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
908                   uint32_t index, uint32_t vmdq __rte_unused)
909 {
910         struct mrvl_priv *priv = dev->data->dev_private;
911         char buf[ETHER_ADDR_FMT_SIZE];
912         int ret;
913
914         if (index == 0)
915                 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
916                 return -1;
917
918         if (!priv->ppio)
919                 return 0;
920
921         /*
922          * Maximum number of uc addresses can be tuned via kernel module mvpp2x
923          * parameter uc_filter_max. Maximum number of mc addresses is then
924          * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
925          * 21 respectively.
926          *
927          * If more than uc_filter_max uc addresses were added to filter list
928          * then NIC will switch to promiscuous mode automatically.
929          *
930          * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
931          * were added to filter list then NIC will switch to all-multicast mode
932          * automatically.
933          */
934         ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
935         if (ret) {
936                 ether_format_addr(buf, sizeof(buf), mac_addr);
937                 RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
938                 return -1;
939         }
940
941         return 0;
942 }
943
944 /**
945  * DPDK callback to set the primary MAC address.
946  *
947  * @param dev
948  *   Pointer to Ethernet device structure.
949  * @param mac_addr
950  *   MAC address to register.
951  */
952 static void
953 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
954 {
955         struct mrvl_priv *priv = dev->data->dev_private;
956         int ret;
957
958         if (!priv->ppio)
959                 return;
960
961         ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
962         if (ret) {
963                 char buf[ETHER_ADDR_FMT_SIZE];
964                 ether_format_addr(buf, sizeof(buf), mac_addr);
965                 RTE_LOG(ERR, PMD, "Failed to set mac to %s\n", buf);
966         }
967 }
968
969 /**
970  * DPDK callback to get device statistics.
971  *
972  * @param dev
973  *   Pointer to Ethernet device structure.
974  * @param stats
975  *   Stats structure output buffer.
976  *
977  * @return
978  *   0 on success, negative error value otherwise.
979  */
980 static int
981 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
982 {
983         struct mrvl_priv *priv = dev->data->dev_private;
984         struct pp2_ppio_statistics ppio_stats;
985         uint64_t drop_mac = 0;
986         unsigned int i, idx, ret;
987
988         if (!priv->ppio)
989                 return -EPERM;
990
991         for (i = 0; i < dev->data->nb_rx_queues; i++) {
992                 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
993                 struct pp2_ppio_inq_statistics rx_stats;
994
995                 if (!rxq)
996                         continue;
997
998                 idx = rxq->queue_id;
999                 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1000                         RTE_LOG(ERR, PMD,
1001                                 "rx queue %d stats out of range (0 - %d)\n",
1002                                 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1003                         continue;
1004                 }
1005
1006                 ret = pp2_ppio_inq_get_statistics(priv->ppio,
1007                                                   priv->rxq_map[idx].tc,
1008                                                   priv->rxq_map[idx].inq,
1009                                                   &rx_stats, 0);
1010                 if (unlikely(ret)) {
1011                         RTE_LOG(ERR, PMD,
1012                                 "Failed to update rx queue %d stats\n", idx);
1013                         break;
1014                 }
1015
1016                 stats->q_ibytes[idx] = rxq->bytes_recv;
1017                 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
1018                 stats->q_errors[idx] = rx_stats.drop_early +
1019                                        rx_stats.drop_fullq +
1020                                        rx_stats.drop_bm +
1021                                        rxq->drop_mac;
1022                 stats->ibytes += rxq->bytes_recv;
1023                 drop_mac += rxq->drop_mac;
1024         }
1025
1026         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1027                 struct mrvl_txq *txq = dev->data->tx_queues[i];
1028                 struct pp2_ppio_outq_statistics tx_stats;
1029
1030                 if (!txq)
1031                         continue;
1032
1033                 idx = txq->queue_id;
1034                 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1035                         RTE_LOG(ERR, PMD,
1036                                 "tx queue %d stats out of range (0 - %d)\n",
1037                                 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1038                 }
1039
1040                 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1041                                                    &tx_stats, 0);
1042                 if (unlikely(ret)) {
1043                         RTE_LOG(ERR, PMD,
1044                                 "Failed to update tx queue %d stats\n", idx);
1045                         break;
1046                 }
1047
1048                 stats->q_opackets[idx] = tx_stats.deq_desc;
1049                 stats->q_obytes[idx] = txq->bytes_sent;
1050                 stats->obytes += txq->bytes_sent;
1051         }
1052
1053         ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1054         if (unlikely(ret)) {
1055                 RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
1056                 return ret;
1057         }
1058
1059         stats->ipackets += ppio_stats.rx_packets - drop_mac;
1060         stats->opackets += ppio_stats.tx_packets;
1061         stats->imissed += ppio_stats.rx_fullq_dropped +
1062                           ppio_stats.rx_bm_dropped +
1063                           ppio_stats.rx_early_dropped +
1064                           ppio_stats.rx_fifo_dropped +
1065                           ppio_stats.rx_cls_dropped;
1066         stats->ierrors = drop_mac;
1067
1068         return 0;
1069 }
1070
1071 /**
1072  * DPDK callback to clear device statistics.
1073  *
1074  * @param dev
1075  *   Pointer to Ethernet device structure.
1076  */
1077 static void
1078 mrvl_stats_reset(struct rte_eth_dev *dev)
1079 {
1080         struct mrvl_priv *priv = dev->data->dev_private;
1081         int i;
1082
1083         if (!priv->ppio)
1084                 return;
1085
1086         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1087                 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1088
1089                 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1090                                             priv->rxq_map[i].inq, NULL, 1);
1091                 rxq->bytes_recv = 0;
1092                 rxq->drop_mac = 0;
1093         }
1094
1095         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1096                 struct mrvl_txq *txq = dev->data->tx_queues[i];
1097
1098                 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1099                 txq->bytes_sent = 0;
1100         }
1101
1102         pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1103 }
1104
1105 /**
1106  * DPDK callback to get information about the device.
1107  *
1108  * @param dev
1109  *   Pointer to Ethernet device structure (unused).
1110  * @param info
1111  *   Info structure output buffer.
1112  */
1113 static void
1114 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
1115                    struct rte_eth_dev_info *info)
1116 {
1117         info->speed_capa = ETH_LINK_SPEED_10M |
1118                            ETH_LINK_SPEED_100M |
1119                            ETH_LINK_SPEED_1G |
1120                            ETH_LINK_SPEED_10G;
1121
1122         info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1123         info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1124         info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1125
1126         info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1127         info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1128         info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1129
1130         info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1131         info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1132         info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1133
1134         info->rx_offload_capa = MRVL_RX_OFFLOADS;
1135         info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
1136
1137         info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
1138                                 DEV_TX_OFFLOAD_UDP_CKSUM |
1139                                 DEV_TX_OFFLOAD_TCP_CKSUM;
1140
1141         info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1142                                        ETH_RSS_NONFRAG_IPV4_TCP |
1143                                        ETH_RSS_NONFRAG_IPV4_UDP;
1144
1145         /* By default packets are dropped if no descriptors are available */
1146         info->default_rxconf.rx_drop_en = 1;
1147         info->default_rxconf.offloads = DEV_RX_OFFLOAD_CRC_STRIP;
1148
1149         info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1150 }
1151
1152 /**
1153  * Return supported packet types.
1154  *
1155  * @param dev
1156  *   Pointer to Ethernet device structure (unused).
1157  *
1158  * @return
1159  *   Const pointer to the table with supported packet types.
1160  */
1161 static const uint32_t *
1162 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1163 {
1164         static const uint32_t ptypes[] = {
1165                 RTE_PTYPE_L2_ETHER,
1166                 RTE_PTYPE_L3_IPV4,
1167                 RTE_PTYPE_L3_IPV4_EXT,
1168                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1169                 RTE_PTYPE_L3_IPV6,
1170                 RTE_PTYPE_L3_IPV6_EXT,
1171                 RTE_PTYPE_L2_ETHER_ARP,
1172                 RTE_PTYPE_L4_TCP,
1173                 RTE_PTYPE_L4_UDP
1174         };
1175
1176         return ptypes;
1177 }
1178
1179 /**
1180  * DPDK callback to get information about specific receive queue.
1181  *
1182  * @param dev
1183  *   Pointer to Ethernet device structure.
1184  * @param rx_queue_id
1185  *   Receive queue index.
1186  * @param qinfo
1187  *   Receive queue information structure.
1188  */
1189 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1190                               struct rte_eth_rxq_info *qinfo)
1191 {
1192         struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1193         struct mrvl_priv *priv = dev->data->dev_private;
1194         int inq = priv->rxq_map[rx_queue_id].inq;
1195         int tc = priv->rxq_map[rx_queue_id].tc;
1196         struct pp2_ppio_tc_params *tc_params =
1197                 &priv->ppio_params.inqs_params.tcs_params[tc];
1198
1199         qinfo->mp = q->mp;
1200         qinfo->nb_desc = tc_params->inqs_params[inq].size;
1201 }
1202
1203 /**
1204  * DPDK callback to get information about specific transmit queue.
1205  *
1206  * @param dev
1207  *   Pointer to Ethernet device structure.
1208  * @param tx_queue_id
1209  *   Transmit queue index.
1210  * @param qinfo
1211  *   Transmit queue information structure.
1212  */
1213 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1214                               struct rte_eth_txq_info *qinfo)
1215 {
1216         struct mrvl_priv *priv = dev->data->dev_private;
1217
1218         qinfo->nb_desc =
1219                 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1220 }
1221
1222 /**
1223  * DPDK callback to Configure a VLAN filter.
1224  *
1225  * @param dev
1226  *   Pointer to Ethernet device structure.
1227  * @param vlan_id
1228  *   VLAN ID to filter.
1229  * @param on
1230  *   Toggle filter.
1231  *
1232  * @return
1233  *   0 on success, negative error value otherwise.
1234  */
1235 static int
1236 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1237 {
1238         struct mrvl_priv *priv = dev->data->dev_private;
1239
1240         if (!priv->ppio)
1241                 return -EPERM;
1242
1243         return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1244                     pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1245 }
1246
1247 /**
1248  * Release buffers to hardware bpool (buffer-pool)
1249  *
1250  * @param rxq
1251  *   Receive queue pointer.
1252  * @param num
1253  *   Number of buffers to release to bpool.
1254  *
1255  * @return
1256  *   0 on success, negative error value otherwise.
1257  */
1258 static int
1259 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1260 {
1261         struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
1262         struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
1263         int i, ret;
1264         unsigned int core_id;
1265         struct pp2_hif *hif;
1266         struct pp2_bpool *bpool;
1267
1268         core_id = rte_lcore_id();
1269         if (core_id == LCORE_ID_ANY)
1270                 core_id = 0;
1271
1272         hif = mrvl_get_hif(rxq->priv, core_id);
1273         if (!hif)
1274                 return -1;
1275
1276         bpool = rxq->priv->bpool;
1277
1278         ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1279         if (ret)
1280                 return ret;
1281
1282         if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1283                 cookie_addr_high =
1284                         (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1285
1286         for (i = 0; i < num; i++) {
1287                 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1288                         != cookie_addr_high) {
1289                         RTE_LOG(ERR, PMD,
1290                                 "mbuf virtual addr high 0x%lx out of range\n",
1291                                 (uint64_t)mbufs[i] >> 32);
1292                         goto out;
1293                 }
1294
1295                 entries[i].buff.addr =
1296                         rte_mbuf_data_iova_default(mbufs[i]);
1297                 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
1298                 entries[i].bpool = bpool;
1299         }
1300
1301         pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1302         mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1303
1304         if (i != num)
1305                 goto out;
1306
1307         return 0;
1308 out:
1309         for (; i < num; i++)
1310                 rte_pktmbuf_free(mbufs[i]);
1311
1312         return -1;
1313 }
1314
1315 /**
1316  * Check whether requested rx queue offloads match port offloads.
1317  *
1318  * @param
1319  *   dev Pointer to the device.
1320  * @param
1321  *   requested Bitmap of the requested offloads.
1322  *
1323  * @return
1324  *   1 if requested offloads are okay, 0 otherwise.
1325  */
1326 static int
1327 mrvl_rx_queue_offloads_okay(struct rte_eth_dev *dev, uint64_t requested)
1328 {
1329         uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
1330         uint64_t supported = MRVL_RX_OFFLOADS;
1331         uint64_t unsupported = requested & ~supported;
1332         uint64_t missing = mandatory & ~requested;
1333
1334         if (unsupported) {
1335                 RTE_LOG(ERR, PMD, "Some Rx offloads are not supported. "
1336                         "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
1337                         requested, supported);
1338                 return 0;
1339         }
1340
1341         if (missing) {
1342                 RTE_LOG(ERR, PMD, "Some Rx offloads are missing. "
1343                         "Requested 0x%" PRIx64 " missing 0x%" PRIx64 ".\n",
1344                         requested, missing);
1345                 return 0;
1346         }
1347
1348         return 1;
1349 }
1350
1351 /**
1352  * DPDK callback to configure the receive queue.
1353  *
1354  * @param dev
1355  *   Pointer to Ethernet device structure.
1356  * @param idx
1357  *   RX queue index.
1358  * @param desc
1359  *   Number of descriptors to configure in queue.
1360  * @param socket
1361  *   NUMA socket on which memory must be allocated.
1362  * @param conf
1363  *   Thresholds parameters.
1364  * @param mp
1365  *   Memory pool for buffer allocations.
1366  *
1367  * @return
1368  *   0 on success, negative error value otherwise.
1369  */
1370 static int
1371 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1372                     unsigned int socket,
1373                     const struct rte_eth_rxconf *conf,
1374                     struct rte_mempool *mp)
1375 {
1376         struct mrvl_priv *priv = dev->data->dev_private;
1377         struct mrvl_rxq *rxq;
1378         uint32_t min_size,
1379                  max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1380         int ret, tc, inq;
1381
1382         if (!mrvl_rx_queue_offloads_okay(dev, conf->offloads))
1383                 return -ENOTSUP;
1384
1385         if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1386                 /*
1387                  * Unknown TC mapping, mapping will not have a correct queue.
1388                  */
1389                 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
1390                         idx, priv->ppio_id);
1391                 return -EFAULT;
1392         }
1393
1394         min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
1395                    MRVL_PKT_EFFEC_OFFS;
1396         if (min_size < max_rx_pkt_len) {
1397                 RTE_LOG(ERR, PMD,
1398                         "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
1399                         max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
1400                         MRVL_PKT_EFFEC_OFFS,
1401                         max_rx_pkt_len);
1402                 return -EINVAL;
1403         }
1404
1405         if (dev->data->rx_queues[idx]) {
1406                 rte_free(dev->data->rx_queues[idx]);
1407                 dev->data->rx_queues[idx] = NULL;
1408         }
1409
1410         rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
1411         if (!rxq)
1412                 return -ENOMEM;
1413
1414         rxq->priv = priv;
1415         rxq->mp = mp;
1416         rxq->cksum_enabled =
1417                 dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
1418         rxq->queue_id = idx;
1419         rxq->port_id = dev->data->port_id;
1420         mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
1421
1422         tc = priv->rxq_map[rxq->queue_id].tc,
1423         inq = priv->rxq_map[rxq->queue_id].inq;
1424         priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
1425                 desc;
1426
1427         ret = mrvl_fill_bpool(rxq, desc);
1428         if (ret) {
1429                 rte_free(rxq);
1430                 return ret;
1431         }
1432
1433         priv->bpool_init_size += desc;
1434
1435         dev->data->rx_queues[idx] = rxq;
1436
1437         return 0;
1438 }
1439
1440 /**
1441  * DPDK callback to release the receive queue.
1442  *
1443  * @param rxq
1444  *   Generic receive queue pointer.
1445  */
1446 static void
1447 mrvl_rx_queue_release(void *rxq)
1448 {
1449         struct mrvl_rxq *q = rxq;
1450         struct pp2_ppio_tc_params *tc_params;
1451         int i, num, tc, inq;
1452         struct pp2_hif *hif;
1453         unsigned int core_id = rte_lcore_id();
1454
1455         if (core_id == LCORE_ID_ANY)
1456                 core_id = 0;
1457
1458         hif = mrvl_get_hif(q->priv, core_id);
1459
1460         if (!q || !hif)
1461                 return;
1462
1463         tc = q->priv->rxq_map[q->queue_id].tc;
1464         inq = q->priv->rxq_map[q->queue_id].inq;
1465         tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1466         num = tc_params->inqs_params[inq].size;
1467         for (i = 0; i < num; i++) {
1468                 struct pp2_buff_inf inf;
1469                 uint64_t addr;
1470
1471                 pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
1472                 addr = cookie_addr_high | inf.cookie;
1473                 rte_pktmbuf_free((struct rte_mbuf *)addr);
1474         }
1475
1476         rte_free(q);
1477 }
1478
1479 /**
1480  * DPDK callback to configure the transmit queue.
1481  *
1482  * @param dev
1483  *   Pointer to Ethernet device structure.
1484  * @param idx
1485  *   Transmit queue index.
1486  * @param desc
1487  *   Number of descriptors to configure in the queue.
1488  * @param socket
1489  *   NUMA socket on which memory must be allocated.
1490  * @param conf
1491  *   Thresholds parameters (unused).
1492  *
1493  * @return
1494  *   0 on success, negative error value otherwise.
1495  */
1496 static int
1497 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1498                     unsigned int socket,
1499                     const struct rte_eth_txconf *conf __rte_unused)
1500 {
1501         struct mrvl_priv *priv = dev->data->dev_private;
1502         struct mrvl_txq *txq;
1503
1504         if (dev->data->tx_queues[idx]) {
1505                 rte_free(dev->data->tx_queues[idx]);
1506                 dev->data->tx_queues[idx] = NULL;
1507         }
1508
1509         txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1510         if (!txq)
1511                 return -ENOMEM;
1512
1513         txq->priv = priv;
1514         txq->queue_id = idx;
1515         txq->port_id = dev->data->port_id;
1516         dev->data->tx_queues[idx] = txq;
1517
1518         priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1519         priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
1520
1521         return 0;
1522 }
1523
1524 /**
1525  * DPDK callback to release the transmit queue.
1526  *
1527  * @param txq
1528  *   Generic transmit queue pointer.
1529  */
1530 static void
1531 mrvl_tx_queue_release(void *txq)
1532 {
1533         struct mrvl_txq *q = txq;
1534
1535         if (!q)
1536                 return;
1537
1538         rte_free(q);
1539 }
1540
1541 /**
1542  * Update RSS hash configuration
1543  *
1544  * @param dev
1545  *   Pointer to Ethernet device structure.
1546  * @param rss_conf
1547  *   Pointer to RSS configuration.
1548  *
1549  * @return
1550  *   0 on success, negative error value otherwise.
1551  */
1552 static int
1553 mrvl_rss_hash_update(struct rte_eth_dev *dev,
1554                      struct rte_eth_rss_conf *rss_conf)
1555 {
1556         struct mrvl_priv *priv = dev->data->dev_private;
1557
1558         return mrvl_configure_rss(priv, rss_conf);
1559 }
1560
1561 /**
1562  * DPDK callback to get RSS hash configuration.
1563  *
1564  * @param dev
1565  *   Pointer to Ethernet device structure.
1566  * @rss_conf
1567  *   Pointer to RSS configuration.
1568  *
1569  * @return
1570  *   Always 0.
1571  */
1572 static int
1573 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
1574                        struct rte_eth_rss_conf *rss_conf)
1575 {
1576         struct mrvl_priv *priv = dev->data->dev_private;
1577         enum pp2_ppio_hash_type hash_type =
1578                 priv->ppio_params.inqs_params.hash_type;
1579
1580         rss_conf->rss_key = NULL;
1581
1582         if (hash_type == PP2_PPIO_HASH_T_NONE)
1583                 rss_conf->rss_hf = 0;
1584         else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
1585                 rss_conf->rss_hf = ETH_RSS_IPV4;
1586         else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
1587                 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
1588         else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
1589                 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
1590
1591         return 0;
1592 }
1593
1594 static const struct eth_dev_ops mrvl_ops = {
1595         .dev_configure = mrvl_dev_configure,
1596         .dev_start = mrvl_dev_start,
1597         .dev_stop = mrvl_dev_stop,
1598         .dev_set_link_up = mrvl_dev_set_link_up,
1599         .dev_set_link_down = mrvl_dev_set_link_down,
1600         .dev_close = mrvl_dev_close,
1601         .link_update = mrvl_link_update,
1602         .promiscuous_enable = mrvl_promiscuous_enable,
1603         .allmulticast_enable = mrvl_allmulticast_enable,
1604         .promiscuous_disable = mrvl_promiscuous_disable,
1605         .allmulticast_disable = mrvl_allmulticast_disable,
1606         .mac_addr_remove = mrvl_mac_addr_remove,
1607         .mac_addr_add = mrvl_mac_addr_add,
1608         .mac_addr_set = mrvl_mac_addr_set,
1609         .mtu_set = mrvl_mtu_set,
1610         .stats_get = mrvl_stats_get,
1611         .stats_reset = mrvl_stats_reset,
1612         .dev_infos_get = mrvl_dev_infos_get,
1613         .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
1614         .rxq_info_get = mrvl_rxq_info_get,
1615         .txq_info_get = mrvl_txq_info_get,
1616         .vlan_filter_set = mrvl_vlan_filter_set,
1617         .rx_queue_setup = mrvl_rx_queue_setup,
1618         .rx_queue_release = mrvl_rx_queue_release,
1619         .tx_queue_setup = mrvl_tx_queue_setup,
1620         .tx_queue_release = mrvl_tx_queue_release,
1621         .rss_hash_update = mrvl_rss_hash_update,
1622         .rss_hash_conf_get = mrvl_rss_hash_conf_get,
1623 };
1624
1625 /**
1626  * Return packet type information and l3/l4 offsets.
1627  *
1628  * @param desc
1629  *   Pointer to the received packet descriptor.
1630  * @param l3_offset
1631  *   l3 packet offset.
1632  * @param l4_offset
1633  *   l4 packet offset.
1634  *
1635  * @return
1636  *   Packet type information.
1637  */
1638 static inline uint64_t
1639 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
1640                                     uint8_t *l3_offset, uint8_t *l4_offset)
1641 {
1642         enum pp2_inq_l3_type l3_type;
1643         enum pp2_inq_l4_type l4_type;
1644         uint64_t packet_type;
1645
1646         pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
1647         pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
1648
1649         packet_type = RTE_PTYPE_L2_ETHER;
1650
1651         switch (l3_type) {
1652         case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
1653                 packet_type |= RTE_PTYPE_L3_IPV4;
1654                 break;
1655         case PP2_INQ_L3_TYPE_IPV4_OK:
1656                 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
1657                 break;
1658         case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
1659                 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
1660                 break;
1661         case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
1662                 packet_type |= RTE_PTYPE_L3_IPV6;
1663                 break;
1664         case PP2_INQ_L3_TYPE_IPV6_EXT:
1665                 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
1666                 break;
1667         case PP2_INQ_L3_TYPE_ARP:
1668                 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
1669                 /*
1670                  * In case of ARP l4_offset is set to wrong value.
1671                  * Set it to proper one so that later on mbuf->l3_len can be
1672                  * calculated subtracting l4_offset and l3_offset.
1673                  */
1674                 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
1675                 break;
1676         default:
1677                 RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n");
1678                 break;
1679         }
1680
1681         switch (l4_type) {
1682         case PP2_INQ_L4_TYPE_TCP:
1683                 packet_type |= RTE_PTYPE_L4_TCP;
1684                 break;
1685         case PP2_INQ_L4_TYPE_UDP:
1686                 packet_type |= RTE_PTYPE_L4_UDP;
1687                 break;
1688         default:
1689                 RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n");
1690                 break;
1691         }
1692
1693         return packet_type;
1694 }
1695
1696 /**
1697  * Get offload information from the received packet descriptor.
1698  *
1699  * @param desc
1700  *   Pointer to the received packet descriptor.
1701  *
1702  * @return
1703  *   Mbuf offload flags.
1704  */
1705 static inline uint64_t
1706 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
1707 {
1708         uint64_t flags;
1709         enum pp2_inq_desc_status status;
1710
1711         status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
1712         if (unlikely(status != PP2_DESC_ERR_OK))
1713                 flags = PKT_RX_IP_CKSUM_BAD;
1714         else
1715                 flags = PKT_RX_IP_CKSUM_GOOD;
1716
1717         status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
1718         if (unlikely(status != PP2_DESC_ERR_OK))
1719                 flags |= PKT_RX_L4_CKSUM_BAD;
1720         else
1721                 flags |= PKT_RX_L4_CKSUM_GOOD;
1722
1723         return flags;
1724 }
1725
1726 /**
1727  * DPDK callback for receive.
1728  *
1729  * @param rxq
1730  *   Generic pointer to the receive queue.
1731  * @param rx_pkts
1732  *   Array to store received packets.
1733  * @param nb_pkts
1734  *   Maximum number of packets in array.
1735  *
1736  * @return
1737  *   Number of packets successfully received.
1738  */
1739 static uint16_t
1740 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1741 {
1742         struct mrvl_rxq *q = rxq;
1743         struct pp2_ppio_desc descs[nb_pkts];
1744         struct pp2_bpool *bpool;
1745         int i, ret, rx_done = 0;
1746         int num;
1747         struct pp2_hif *hif;
1748         unsigned int core_id = rte_lcore_id();
1749
1750         hif = mrvl_get_hif(q->priv, core_id);
1751
1752         if (unlikely(!q->priv->ppio || !hif))
1753                 return 0;
1754
1755         bpool = q->priv->bpool;
1756
1757         ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
1758                             q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
1759         if (unlikely(ret < 0)) {
1760                 RTE_LOG(ERR, PMD, "Failed to receive packets\n");
1761                 return 0;
1762         }
1763         mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
1764
1765         for (i = 0; i < nb_pkts; i++) {
1766                 struct rte_mbuf *mbuf;
1767                 uint8_t l3_offset, l4_offset;
1768                 enum pp2_inq_desc_status status;
1769                 uint64_t addr;
1770
1771                 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1772                         struct pp2_ppio_desc *pref_desc;
1773                         u64 pref_addr;
1774
1775                         pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
1776                         pref_addr = cookie_addr_high |
1777                                     pp2_ppio_inq_desc_get_cookie(pref_desc);
1778                         rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
1779                         rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
1780                 }
1781
1782                 addr = cookie_addr_high |
1783                        pp2_ppio_inq_desc_get_cookie(&descs[i]);
1784                 mbuf = (struct rte_mbuf *)addr;
1785                 rte_pktmbuf_reset(mbuf);
1786
1787                 /* drop packet in case of mac, overrun or resource error */
1788                 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
1789                 if (unlikely(status != PP2_DESC_ERR_OK)) {
1790                         struct pp2_buff_inf binf = {
1791                                 .addr = rte_mbuf_data_iova_default(mbuf),
1792                                 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
1793                         };
1794
1795                         pp2_bpool_put_buff(hif, bpool, &binf);
1796                         mrvl_port_bpool_size
1797                                 [bpool->pp2_id][bpool->id][core_id]++;
1798                         q->drop_mac++;
1799                         continue;
1800                 }
1801
1802                 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
1803                 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
1804                 mbuf->data_len = mbuf->pkt_len;
1805                 mbuf->port = q->port_id;
1806                 mbuf->packet_type =
1807                         mrvl_desc_to_packet_type_and_offset(&descs[i],
1808                                                             &l3_offset,
1809                                                             &l4_offset);
1810                 mbuf->l2_len = l3_offset;
1811                 mbuf->l3_len = l4_offset - l3_offset;
1812
1813                 if (likely(q->cksum_enabled))
1814                         mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
1815
1816                 rx_pkts[rx_done++] = mbuf;
1817                 q->bytes_recv += mbuf->pkt_len;
1818         }
1819
1820         if (rte_spinlock_trylock(&q->priv->lock) == 1) {
1821                 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
1822
1823                 if (unlikely(num <= q->priv->bpool_min_size ||
1824                              (!rx_done && num < q->priv->bpool_init_size))) {
1825                         ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
1826                         if (ret)
1827                                 RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
1828                 } else if (unlikely(num > q->priv->bpool_max_size)) {
1829                         int i;
1830                         int pkt_to_remove = num - q->priv->bpool_init_size;
1831                         struct rte_mbuf *mbuf;
1832                         struct pp2_buff_inf buff;
1833
1834                         RTE_LOG(DEBUG, PMD,
1835                                 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
1836                                 bpool->pp2_id, q->priv->ppio->port_id,
1837                                 bpool->id, pkt_to_remove, num,
1838                                 q->priv->bpool_init_size);
1839
1840                         for (i = 0; i < pkt_to_remove; i++) {
1841                                 ret = pp2_bpool_get_buff(hif, bpool, &buff);
1842                                 if (ret)
1843                                         break;
1844                                 mbuf = (struct rte_mbuf *)
1845                                         (cookie_addr_high | buff.cookie);
1846                                 rte_pktmbuf_free(mbuf);
1847                         }
1848                         mrvl_port_bpool_size
1849                                 [bpool->pp2_id][bpool->id][core_id] -= i;
1850                 }
1851                 rte_spinlock_unlock(&q->priv->lock);
1852         }
1853
1854         return rx_done;
1855 }
1856
1857 /**
1858  * Prepare offload information.
1859  *
1860  * @param ol_flags
1861  *   Offload flags.
1862  * @param packet_type
1863  *   Packet type bitfield.
1864  * @param l3_type
1865  *   Pointer to the pp2_ouq_l3_type structure.
1866  * @param l4_type
1867  *   Pointer to the pp2_outq_l4_type structure.
1868  * @param gen_l3_cksum
1869  *   Will be set to 1 in case l3 checksum is computed.
1870  * @param l4_cksum
1871  *   Will be set to 1 in case l4 checksum is computed.
1872  *
1873  * @return
1874  *   0 on success, negative error value otherwise.
1875  */
1876 static inline int
1877 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
1878                         enum pp2_outq_l3_type *l3_type,
1879                         enum pp2_outq_l4_type *l4_type,
1880                         int *gen_l3_cksum,
1881                         int *gen_l4_cksum)
1882 {
1883         /*
1884          * Based on ol_flags prepare information
1885          * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
1886          * for offloading.
1887          */
1888         if (ol_flags & PKT_TX_IPV4) {
1889                 *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
1890                 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
1891         } else if (ol_flags & PKT_TX_IPV6) {
1892                 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
1893                 /* no checksum for ipv6 header */
1894                 *gen_l3_cksum = 0;
1895         } else {
1896                 /* if something different then stop processing */
1897                 return -1;
1898         }
1899
1900         ol_flags &= PKT_TX_L4_MASK;
1901         if ((packet_type & RTE_PTYPE_L4_TCP) &&
1902             ol_flags == PKT_TX_TCP_CKSUM) {
1903                 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
1904                 *gen_l4_cksum = 1;
1905         } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
1906                    ol_flags == PKT_TX_UDP_CKSUM) {
1907                 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
1908                 *gen_l4_cksum = 1;
1909         } else {
1910                 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
1911                 /* no checksum for other type */
1912                 *gen_l4_cksum = 0;
1913         }
1914
1915         return 0;
1916 }
1917
1918 /**
1919  * Release already sent buffers to bpool (buffer-pool).
1920  *
1921  * @param ppio
1922  *   Pointer to the port structure.
1923  * @param hif
1924  *   Pointer to the MUSDK hardware interface.
1925  * @param sq
1926  *   Pointer to the shadow queue.
1927  * @param qid
1928  *   Queue id number.
1929  * @param force
1930  *   Force releasing packets.
1931  */
1932 static inline void
1933 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
1934                        unsigned int core_id, struct mrvl_shadow_txq *sq,
1935                        int qid, int force)
1936 {
1937         struct buff_release_entry *entry;
1938         uint16_t nb_done = 0, num = 0, skip_bufs = 0;
1939         int i;
1940
1941         pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
1942
1943         sq->num_to_release += nb_done;
1944
1945         if (likely(!force &&
1946                    sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
1947                 return;
1948
1949         nb_done = sq->num_to_release;
1950         sq->num_to_release = 0;
1951
1952         for (i = 0; i < nb_done; i++) {
1953                 entry = &sq->ent[sq->tail + num];
1954                 if (unlikely(!entry->buff.addr)) {
1955                         RTE_LOG(ERR, PMD,
1956                                 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
1957                                 sq->tail, (u64)entry->buff.cookie,
1958                                 (u64)entry->buff.addr);
1959                         skip_bufs = 1;
1960                         goto skip;
1961                 }
1962
1963                 if (unlikely(!entry->bpool)) {
1964                         struct rte_mbuf *mbuf;
1965
1966                         mbuf = (struct rte_mbuf *)
1967                                (cookie_addr_high | entry->buff.cookie);
1968                         rte_pktmbuf_free(mbuf);
1969                         skip_bufs = 1;
1970                         goto skip;
1971                 }
1972
1973                 mrvl_port_bpool_size
1974                         [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
1975                 num++;
1976                 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
1977                         goto skip;
1978                 continue;
1979 skip:
1980                 if (likely(num))
1981                         pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1982                 num += skip_bufs;
1983                 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1984                 sq->size -= num;
1985                 num = 0;
1986                 skip_bufs = 0;
1987         }
1988
1989         if (likely(num)) {
1990                 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1991                 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1992                 sq->size -= num;
1993         }
1994 }
1995
1996 /**
1997  * DPDK callback for transmit.
1998  *
1999  * @param txq
2000  *   Generic pointer transmit queue.
2001  * @param tx_pkts
2002  *   Packets to transmit.
2003  * @param nb_pkts
2004  *   Number of packets in array.
2005  *
2006  * @return
2007  *   Number of packets successfully transmitted.
2008  */
2009 static uint16_t
2010 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2011 {
2012         struct mrvl_txq *q = txq;
2013         struct mrvl_shadow_txq *sq;
2014         struct pp2_hif *hif;
2015         struct pp2_ppio_desc descs[nb_pkts];
2016         unsigned int core_id = rte_lcore_id();
2017         int i, ret, bytes_sent = 0;
2018         uint16_t num, sq_free_size;
2019         uint64_t addr;
2020
2021         hif = mrvl_get_hif(q->priv, core_id);
2022         sq = &q->shadow_txqs[core_id];
2023
2024         if (unlikely(!q->priv->ppio || !hif))
2025                 return 0;
2026
2027         if (sq->size)
2028                 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2029                                        sq, q->queue_id, 0);
2030
2031         sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2032         if (unlikely(nb_pkts > sq_free_size)) {
2033                 RTE_LOG(DEBUG, PMD,
2034                         "No room in shadow queue for %d packets! %d packets will be sent.\n",
2035                         nb_pkts, sq_free_size);
2036                 nb_pkts = sq_free_size;
2037         }
2038
2039         for (i = 0; i < nb_pkts; i++) {
2040                 struct rte_mbuf *mbuf = tx_pkts[i];
2041                 int gen_l3_cksum, gen_l4_cksum;
2042                 enum pp2_outq_l3_type l3_type;
2043                 enum pp2_outq_l4_type l4_type;
2044
2045                 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2046                         struct rte_mbuf *pref_pkt_hdr;
2047
2048                         pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2049                         rte_mbuf_prefetch_part1(pref_pkt_hdr);
2050                         rte_mbuf_prefetch_part2(pref_pkt_hdr);
2051                 }
2052
2053                 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
2054                 sq->ent[sq->head].buff.addr =
2055                         rte_mbuf_data_iova_default(mbuf);
2056                 sq->ent[sq->head].bpool =
2057                         (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
2058                          NULL : mrvl_port_to_bpool_lookup[mbuf->port];
2059                 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
2060                 sq->size++;
2061
2062                 pp2_ppio_outq_desc_reset(&descs[i]);
2063                 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
2064                                                  rte_pktmbuf_iova(mbuf));
2065                 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
2066                 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
2067                                                rte_pktmbuf_pkt_len(mbuf));
2068
2069                 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2070                 /*
2071                  * in case unsupported ol_flags were passed
2072                  * do not update descriptor offload information
2073                  */
2074                 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
2075                                               &l3_type, &l4_type, &gen_l3_cksum,
2076                                               &gen_l4_cksum);
2077                 if (unlikely(ret))
2078                         continue;
2079
2080                 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2081                                                   mbuf->l2_len,
2082                                                   mbuf->l2_len + mbuf->l3_len,
2083                                                   gen_l3_cksum, gen_l4_cksum);
2084         }
2085
2086         num = nb_pkts;
2087         pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2088         /* number of packets that were not sent */
2089         if (unlikely(num > nb_pkts)) {
2090                 for (i = nb_pkts; i < num; i++) {
2091                         sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2092                                 MRVL_PP2_TX_SHADOWQ_MASK;
2093                         addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
2094                         bytes_sent -=
2095                                 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2096                 }
2097                 sq->size -= num - nb_pkts;
2098         }
2099
2100         q->bytes_sent += bytes_sent;
2101
2102         return nb_pkts;
2103 }
2104
2105 /**
2106  * Initialize packet processor.
2107  *
2108  * @return
2109  *   0 on success, negative error value otherwise.
2110  */
2111 static int
2112 mrvl_init_pp2(void)
2113 {
2114         struct pp2_init_params init_params;
2115
2116         memset(&init_params, 0, sizeof(init_params));
2117         init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
2118         init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
2119         init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
2120
2121         return pp2_init(&init_params);
2122 }
2123
2124 /**
2125  * Deinitialize packet processor.
2126  *
2127  * @return
2128  *   0 on success, negative error value otherwise.
2129  */
2130 static void
2131 mrvl_deinit_pp2(void)
2132 {
2133         pp2_deinit();
2134 }
2135
2136 /**
2137  * Create private device structure.
2138  *
2139  * @param dev_name
2140  *   Pointer to the port name passed in the initialization parameters.
2141  *
2142  * @return
2143  *   Pointer to the newly allocated private device structure.
2144  */
2145 static struct mrvl_priv *
2146 mrvl_priv_create(const char *dev_name)
2147 {
2148         struct pp2_bpool_params bpool_params;
2149         char match[MRVL_MATCH_LEN];
2150         struct mrvl_priv *priv;
2151         int ret, bpool_bit;
2152
2153         priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
2154         if (!priv)
2155                 return NULL;
2156
2157         ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
2158                                        &priv->pp_id, &priv->ppio_id);
2159         if (ret)
2160                 goto out_free_priv;
2161
2162         bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
2163                                      PP2_BPOOL_NUM_POOLS);
2164         if (bpool_bit < 0)
2165                 goto out_free_priv;
2166         priv->bpool_bit = bpool_bit;
2167
2168         snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
2169                  priv->bpool_bit);
2170         memset(&bpool_params, 0, sizeof(bpool_params));
2171         bpool_params.match = match;
2172         bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
2173         ret = pp2_bpool_init(&bpool_params, &priv->bpool);
2174         if (ret)
2175                 goto out_clear_bpool_bit;
2176
2177         priv->ppio_params.type = PP2_PPIO_T_NIC;
2178         rte_spinlock_init(&priv->lock);
2179
2180         return priv;
2181 out_clear_bpool_bit:
2182         used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2183 out_free_priv:
2184         rte_free(priv);
2185         return NULL;
2186 }
2187
2188 /**
2189  * Create device representing Ethernet port.
2190  *
2191  * @param name
2192  *   Pointer to the port's name.
2193  *
2194  * @return
2195  *   0 on success, negative error value otherwise.
2196  */
2197 static int
2198 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
2199 {
2200         int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
2201         struct rte_eth_dev *eth_dev;
2202         struct mrvl_priv *priv;
2203         struct ifreq req;
2204
2205         eth_dev = rte_eth_dev_allocate(name);
2206         if (!eth_dev)
2207                 return -ENOMEM;
2208
2209         priv = mrvl_priv_create(name);
2210         if (!priv) {
2211                 ret = -ENOMEM;
2212                 goto out_free_dev;
2213         }
2214
2215         eth_dev->data->mac_addrs =
2216                 rte_zmalloc("mac_addrs",
2217                             ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
2218         if (!eth_dev->data->mac_addrs) {
2219                 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
2220                 ret = -ENOMEM;
2221                 goto out_free_priv;
2222         }
2223
2224         memset(&req, 0, sizeof(req));
2225         strcpy(req.ifr_name, name);
2226         ret = ioctl(fd, SIOCGIFHWADDR, &req);
2227         if (ret)
2228                 goto out_free_mac;
2229
2230         memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
2231                req.ifr_addr.sa_data, ETHER_ADDR_LEN);
2232
2233         eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
2234         eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
2235         eth_dev->data->kdrv = RTE_KDRV_NONE;
2236         eth_dev->data->dev_private = priv;
2237         eth_dev->device = &vdev->device;
2238         eth_dev->dev_ops = &mrvl_ops;
2239
2240         return 0;
2241 out_free_mac:
2242         rte_free(eth_dev->data->mac_addrs);
2243 out_free_dev:
2244         rte_eth_dev_release_port(eth_dev);
2245 out_free_priv:
2246         rte_free(priv);
2247
2248         return ret;
2249 }
2250
2251 /**
2252  * Cleanup previously created device representing Ethernet port.
2253  *
2254  * @param name
2255  *   Pointer to the port name.
2256  */
2257 static void
2258 mrvl_eth_dev_destroy(const char *name)
2259 {
2260         struct rte_eth_dev *eth_dev;
2261         struct mrvl_priv *priv;
2262
2263         eth_dev = rte_eth_dev_allocated(name);
2264         if (!eth_dev)
2265                 return;
2266
2267         priv = eth_dev->data->dev_private;
2268         pp2_bpool_deinit(priv->bpool);
2269         used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2270         rte_free(priv);
2271         rte_free(eth_dev->data->mac_addrs);
2272         rte_eth_dev_release_port(eth_dev);
2273 }
2274
2275 /**
2276  * Callback used by rte_kvargs_process() during argument parsing.
2277  *
2278  * @param key
2279  *   Pointer to the parsed key (unused).
2280  * @param value
2281  *   Pointer to the parsed value.
2282  * @param extra_args
2283  *   Pointer to the extra arguments which contains address of the
2284  *   table of pointers to parsed interface names.
2285  *
2286  * @return
2287  *   Always 0.
2288  */
2289 static int
2290 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
2291                  void *extra_args)
2292 {
2293         struct mrvl_ifnames *ifnames = extra_args;
2294
2295         ifnames->names[ifnames->idx++] = value;
2296
2297         return 0;
2298 }
2299
2300 /**
2301  * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
2302  */
2303 static void
2304 mrvl_deinit_hifs(void)
2305 {
2306         int i;
2307
2308         for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
2309                 if (hifs[i])
2310                         pp2_hif_deinit(hifs[i]);
2311         }
2312         used_hifs = MRVL_MUSDK_HIFS_RESERVED;
2313         memset(hifs, 0, sizeof(hifs));
2314 }
2315
2316 /**
2317  * DPDK callback to register the virtual device.
2318  *
2319  * @param vdev
2320  *   Pointer to the virtual device.
2321  *
2322  * @return
2323  *   0 on success, negative error value otherwise.
2324  */
2325 static int
2326 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
2327 {
2328         struct rte_kvargs *kvlist;
2329         struct mrvl_ifnames ifnames;
2330         int ret = -EINVAL;
2331         uint32_t i, ifnum, cfgnum;
2332         const char *params;
2333
2334         params = rte_vdev_device_args(vdev);
2335         if (!params)
2336                 return -EINVAL;
2337
2338         kvlist = rte_kvargs_parse(params, valid_args);
2339         if (!kvlist)
2340                 return -EINVAL;
2341
2342         ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
2343         if (ifnum > RTE_DIM(ifnames.names))
2344                 goto out_free_kvlist;
2345
2346         ifnames.idx = 0;
2347         rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
2348                            mrvl_get_ifnames, &ifnames);
2349
2350
2351         /*
2352          * The below system initialization should be done only once,
2353          * on the first provided configuration file
2354          */
2355         if (!mrvl_qos_cfg) {
2356                 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
2357                 RTE_LOG(INFO, PMD, "Parsing config file!\n");
2358                 if (cfgnum > 1) {
2359                         RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
2360                         goto out_free_kvlist;
2361                 } else if (cfgnum == 1) {
2362                         rte_kvargs_process(kvlist, MRVL_CFG_ARG,
2363                                            mrvl_get_qoscfg, &mrvl_qos_cfg);
2364                 }
2365         }
2366
2367         if (mrvl_dev_num)
2368                 goto init_devices;
2369
2370         RTE_LOG(INFO, PMD, "Perform MUSDK initializations\n");
2371         /*
2372          * ret == -EEXIST is correct, it means DMA
2373          * has been already initialized (by another PMD).
2374          */
2375         ret = mv_sys_dma_mem_init(MRVL_MUSDK_DMA_MEMSIZE);
2376         if (ret < 0) {
2377                 if (ret != -EEXIST)
2378                         goto out_free_kvlist;
2379                 else
2380                         RTE_LOG(INFO, PMD,
2381                                 "DMA memory has been already initialized by a different driver.\n");
2382         }
2383
2384         ret = mrvl_init_pp2();
2385         if (ret) {
2386                 RTE_LOG(ERR, PMD, "Failed to init PP!\n");
2387                 goto out_deinit_dma;
2388         }
2389
2390         memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
2391
2392         mrvl_lcore_first = RTE_MAX_LCORE;
2393         mrvl_lcore_last = 0;
2394
2395 init_devices:
2396         for (i = 0; i < ifnum; i++) {
2397                 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames.names[i]);
2398                 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
2399                 if (ret)
2400                         goto out_cleanup;
2401         }
2402         mrvl_dev_num += ifnum;
2403
2404         rte_kvargs_free(kvlist);
2405
2406         return 0;
2407 out_cleanup:
2408         for (; i > 0; i--)
2409                 mrvl_eth_dev_destroy(ifnames.names[i]);
2410
2411         if (mrvl_dev_num == 0)
2412                 mrvl_deinit_pp2();
2413 out_deinit_dma:
2414         if (mrvl_dev_num == 0)
2415                 mv_sys_dma_mem_destroy();
2416 out_free_kvlist:
2417         rte_kvargs_free(kvlist);
2418
2419         return ret;
2420 }
2421
2422 /**
2423  * DPDK callback to remove virtual device.
2424  *
2425  * @param vdev
2426  *   Pointer to the removed virtual device.
2427  *
2428  * @return
2429  *   0 on success, negative error value otherwise.
2430  */
2431 static int
2432 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
2433 {
2434         int i;
2435         const char *name;
2436
2437         name = rte_vdev_device_name(vdev);
2438         if (!name)
2439                 return -EINVAL;
2440
2441         RTE_LOG(INFO, PMD, "Removing %s\n", name);
2442
2443         for (i = 0; i < rte_eth_dev_count(); i++) {
2444                 char ifname[RTE_ETH_NAME_MAX_LEN];
2445
2446                 rte_eth_dev_get_name_by_port(i, ifname);
2447                 mrvl_eth_dev_destroy(ifname);
2448                 mrvl_dev_num--;
2449         }
2450
2451         if (mrvl_dev_num == 0) {
2452                 RTE_LOG(INFO, PMD, "Perform MUSDK deinit\n");
2453                 mrvl_deinit_hifs();
2454                 mrvl_deinit_pp2();
2455                 mv_sys_dma_mem_destroy();
2456         }
2457
2458         return 0;
2459 }
2460
2461 static struct rte_vdev_driver pmd_mrvl_drv = {
2462         .probe = rte_pmd_mrvl_probe,
2463         .remove = rte_pmd_mrvl_remove,
2464 };
2465
2466 RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
2467 RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);