43abe48997c1be102669b27959acf13022379dfc
[dpdk.git] / drivers / net / mvpp2 / mrvl_ethdev.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017-2021 Marvell International Ltd.
3  * Copyright(c) 2017-2021 Semihalf.
4  * All rights reserved.
5  */
6
7 #include <rte_string_fns.h>
8 #include <ethdev_driver.h>
9 #include <rte_kvargs.h>
10 #include <rte_log.h>
11 #include <rte_malloc.h>
12 #include <rte_bus_vdev.h>
13
14 #include <fcntl.h>
15 #include <linux/ethtool.h>
16 #include <linux/sockios.h>
17 #include <net/if.h>
18 #include <net/if_arp.h>
19 #include <sys/ioctl.h>
20 #include <sys/socket.h>
21 #include <sys/stat.h>
22 #include <sys/types.h>
23
24 #include <rte_mvep_common.h>
25 #include "mrvl_ethdev.h"
26 #include "mrvl_qos.h"
27 #include "mrvl_flow.h"
28 #include "mrvl_mtr.h"
29 #include "mrvl_tm.h"
30
31 /* bitmask with reserved hifs */
32 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
33 /* bitmask with reserved bpools */
34 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
35 /* bitmask with reserved kernel RSS tables */
36 #define MRVL_MUSDK_RSS_RESERVED 0x0F
37 /* maximum number of available hifs */
38 #define MRVL_MUSDK_HIFS_MAX 9
39
40 /* prefetch shift */
41 #define MRVL_MUSDK_PREFETCH_SHIFT 2
42
43 /* TCAM has 25 entries reserved for uc/mc filter entries
44  * + 1 for primary mac address
45  */
46 #define MRVL_MAC_ADDRS_MAX (1 + 25)
47 #define MRVL_MATCH_LEN 16
48 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
49 /* Maximum allowable packet size */
50 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
51
52 #define MRVL_IFACE_NAME_ARG "iface"
53 #define MRVL_CFG_ARG "cfg"
54
55 #define MRVL_BURST_SIZE 64
56
57 #define MRVL_ARP_LENGTH 28
58
59 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
60 #define MRVL_COOKIE_HIGH_ADDR_MASK 0xffffff0000000000
61
62 /** Port Rx offload capabilities */
63 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
64                           DEV_RX_OFFLOAD_JUMBO_FRAME | \
65                           DEV_RX_OFFLOAD_CHECKSUM)
66
67 /** Port Tx offloads capabilities */
68 #define MRVL_TX_OFFLOAD_CHECKSUM (DEV_TX_OFFLOAD_IPV4_CKSUM | \
69                                   DEV_TX_OFFLOAD_UDP_CKSUM  | \
70                                   DEV_TX_OFFLOAD_TCP_CKSUM)
71 #define MRVL_TX_OFFLOADS (MRVL_TX_OFFLOAD_CHECKSUM | \
72                           DEV_TX_OFFLOAD_MULTI_SEGS)
73
74 #define MRVL_TX_PKT_OFFLOADS (PKT_TX_IP_CKSUM | \
75                               PKT_TX_TCP_CKSUM | \
76                               PKT_TX_UDP_CKSUM)
77
78 static const char * const valid_args[] = {
79         MRVL_IFACE_NAME_ARG,
80         MRVL_CFG_ARG,
81         NULL
82 };
83
84 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
85 static struct pp2_hif *hifs[RTE_MAX_LCORE];
86 static int used_bpools[PP2_NUM_PKT_PROC] = {
87         [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
88 };
89
90 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
91 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
92 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
93 static int dummy_pool_id[PP2_NUM_PKT_PROC];
94 struct pp2_bpool *dummy_pool[PP2_NUM_PKT_PROC] = {0};
95
96 struct mrvl_ifnames {
97         const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
98         int idx;
99 };
100
101 /*
102  * To use buffer harvesting based on loopback port shadow queue structure
103  * was introduced for buffers information bookkeeping.
104  *
105  * Before sending the packet, related buffer information (pp2_buff_inf) is
106  * stored in shadow queue. After packet is transmitted no longer used
107  * packet buffer is released back to it's original hardware pool,
108  * on condition it originated from interface.
109  * In case it  was generated by application itself i.e: mbuf->port field is
110  * 0xff then its released to software mempool.
111  */
112 struct mrvl_shadow_txq {
113         int head;           /* write index - used when sending buffers */
114         int tail;           /* read index - used when releasing buffers */
115         u16 size;           /* queue occupied size */
116         u16 num_to_release; /* number of descriptors sent, that can be
117                              * released
118                              */
119         struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
120 };
121
122 struct mrvl_rxq {
123         struct mrvl_priv *priv;
124         struct rte_mempool *mp;
125         int queue_id;
126         int port_id;
127         int cksum_enabled;
128         uint64_t bytes_recv;
129         uint64_t drop_mac;
130 };
131
132 struct mrvl_txq {
133         struct mrvl_priv *priv;
134         int queue_id;
135         int port_id;
136         uint64_t bytes_sent;
137         struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
138         int tx_deferred_start;
139 };
140
141 static int mrvl_lcore_first;
142 static int mrvl_lcore_last;
143 static int mrvl_dev_num;
144
145 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
146 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
147                         struct pp2_hif *hif, unsigned int core_id,
148                         struct mrvl_shadow_txq *sq, int qid, int force);
149
150 static uint16_t mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
151                                   uint16_t nb_pkts);
152 static uint16_t mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
153                                      uint16_t nb_pkts);
154 static int rte_pmd_mrvl_remove(struct rte_vdev_device *vdev);
155 static void mrvl_deinit_pp2(void);
156 static void mrvl_deinit_hifs(void);
157
158 static int
159 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
160                   uint32_t index, uint32_t vmdq __rte_unused);
161 static int
162 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr);
163 static int
164 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
165 static int mrvl_promiscuous_enable(struct rte_eth_dev *dev);
166 static int mrvl_allmulticast_enable(struct rte_eth_dev *dev);
167
168 #define MRVL_XSTATS_TBL_ENTRY(name) { \
169         #name, offsetof(struct pp2_ppio_statistics, name),      \
170         sizeof(((struct pp2_ppio_statistics *)0)->name)         \
171 }
172
173 /* Table with xstats data */
174 static struct {
175         const char *name;
176         unsigned int offset;
177         unsigned int size;
178 } mrvl_xstats_tbl[] = {
179         MRVL_XSTATS_TBL_ENTRY(rx_bytes),
180         MRVL_XSTATS_TBL_ENTRY(rx_packets),
181         MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
182         MRVL_XSTATS_TBL_ENTRY(rx_errors),
183         MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
184         MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
185         MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
186         MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
187         MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
188         MRVL_XSTATS_TBL_ENTRY(tx_bytes),
189         MRVL_XSTATS_TBL_ENTRY(tx_packets),
190         MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
191         MRVL_XSTATS_TBL_ENTRY(tx_errors)
192 };
193
194 static inline int
195 mrvl_reserve_bit(int *bitmap, int max)
196 {
197         int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
198
199         if (n >= max)
200                 return -1;
201
202         *bitmap |= 1 << n;
203
204         return n;
205 }
206
207 static int
208 mrvl_pp2_fixup_init(void)
209 {
210         struct pp2_bpool_params bpool_params;
211         char                    name[15];
212         int                     err, i;
213
214         memset(dummy_pool, 0, sizeof(dummy_pool));
215         for (i = 0; i < pp2_get_num_inst(); i++) {
216                 dummy_pool_id[i] = mrvl_reserve_bit(&used_bpools[i],
217                                              PP2_BPOOL_NUM_POOLS);
218                 if (dummy_pool_id[i] < 0) {
219                         MRVL_LOG(ERR, "Can't find free pool\n");
220                         return -1;
221                 }
222
223                 memset(name, 0, sizeof(name));
224                 snprintf(name, sizeof(name), "pool-%d:%d", i, dummy_pool_id[i]);
225                 memset(&bpool_params, 0, sizeof(bpool_params));
226                 bpool_params.match = name;
227                 bpool_params.buff_len = MRVL_PKT_OFFS;
228                 bpool_params.dummy_short_pool = 1;
229                 err = pp2_bpool_init(&bpool_params, &dummy_pool[i]);
230                 if (err != 0 || !dummy_pool[i]) {
231                         MRVL_LOG(ERR, "BPool init failed!\n");
232                         used_bpools[i] &= ~(1 << dummy_pool_id[i]);
233                         return -1;
234                 }
235         }
236
237         return 0;
238 }
239
240 /**
241  * Initialize packet processor.
242  *
243  * @return
244  *   0 on success, negative error value otherwise.
245  */
246 static int
247 mrvl_init_pp2(void)
248 {
249         struct pp2_init_params  init_params;
250         int                     err;
251
252         memset(&init_params, 0, sizeof(init_params));
253         init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
254         init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
255         init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
256         if (mrvl_cfg && mrvl_cfg->pp2_cfg.prs_udfs.num_udfs)
257                 memcpy(&init_params.prs_udfs, &mrvl_cfg->pp2_cfg.prs_udfs,
258                        sizeof(struct pp2_parse_udfs));
259         err = pp2_init(&init_params);
260         if (err != 0) {
261                 MRVL_LOG(ERR, "PP2 init failed");
262                 return -1;
263         }
264
265         err = mrvl_pp2_fixup_init();
266         if (err != 0) {
267                 MRVL_LOG(ERR, "PP2 fixup init failed");
268                 return -1;
269         }
270
271         return 0;
272 }
273
274 static void
275 mrvl_pp2_fixup_deinit(void)
276 {
277         int i;
278
279         for (i = 0; i < PP2_NUM_PKT_PROC; i++) {
280                 if (!dummy_pool[i])
281                         continue;
282                 pp2_bpool_deinit(dummy_pool[i]);
283                 used_bpools[i] &= ~(1 << dummy_pool_id[i]);
284         }
285 }
286
287 /**
288  * Deinitialize packet processor.
289  *
290  * @return
291  *   0 on success, negative error value otherwise.
292  */
293 static void
294 mrvl_deinit_pp2(void)
295 {
296         mrvl_pp2_fixup_deinit();
297         pp2_deinit();
298 }
299
300 static inline void
301 mrvl_fill_shadowq(struct mrvl_shadow_txq *sq, struct rte_mbuf *buf)
302 {
303         sq->ent[sq->head].buff.cookie = (uint64_t)buf;
304         sq->ent[sq->head].buff.addr = buf ?
305                 rte_mbuf_data_iova_default(buf) : 0;
306
307         sq->ent[sq->head].bpool =
308                 (unlikely(!buf || buf->port >= RTE_MAX_ETHPORTS ||
309                  buf->refcnt > 1)) ? NULL :
310                  mrvl_port_to_bpool_lookup[buf->port];
311
312         sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
313         sq->size++;
314 }
315
316 /**
317  * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
318  */
319 static void
320 mrvl_deinit_hifs(void)
321 {
322         int i;
323
324         for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
325                 if (hifs[i])
326                         pp2_hif_deinit(hifs[i]);
327         }
328         used_hifs = MRVL_MUSDK_HIFS_RESERVED;
329         memset(hifs, 0, sizeof(hifs));
330 }
331
332 static inline void
333 mrvl_fill_desc(struct pp2_ppio_desc *desc, struct rte_mbuf *buf)
334 {
335         pp2_ppio_outq_desc_reset(desc);
336         pp2_ppio_outq_desc_set_phys_addr(desc, rte_pktmbuf_iova(buf));
337         pp2_ppio_outq_desc_set_pkt_offset(desc, 0);
338         pp2_ppio_outq_desc_set_pkt_len(desc, rte_pktmbuf_data_len(buf));
339 }
340
341 static inline int
342 mrvl_get_bpool_size(int pp2_id, int pool_id)
343 {
344         int i;
345         int size = 0;
346
347         for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
348                 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
349
350         return size;
351 }
352
353 static int
354 mrvl_init_hif(int core_id)
355 {
356         struct pp2_hif_params params;
357         char match[MRVL_MATCH_LEN];
358         int ret;
359
360         ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
361         if (ret < 0) {
362                 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
363                 return ret;
364         }
365
366         snprintf(match, sizeof(match), "hif-%d", ret);
367         memset(&params, 0, sizeof(params));
368         params.match = match;
369         params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
370         ret = pp2_hif_init(&params, &hifs[core_id]);
371         if (ret) {
372                 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
373                 return ret;
374         }
375
376         return 0;
377 }
378
379 static inline struct pp2_hif*
380 mrvl_get_hif(struct mrvl_priv *priv, int core_id)
381 {
382         int ret;
383
384         if (likely(hifs[core_id] != NULL))
385                 return hifs[core_id];
386
387         rte_spinlock_lock(&priv->lock);
388
389         ret = mrvl_init_hif(core_id);
390         if (ret < 0) {
391                 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
392                 goto out;
393         }
394
395         if (core_id < mrvl_lcore_first)
396                 mrvl_lcore_first = core_id;
397
398         if (core_id > mrvl_lcore_last)
399                 mrvl_lcore_last = core_id;
400 out:
401         rte_spinlock_unlock(&priv->lock);
402
403         return hifs[core_id];
404 }
405
406 /**
407  * Set tx burst function according to offload flag
408  *
409  * @param dev
410  *   Pointer to Ethernet device structure.
411  */
412 static void
413 mrvl_set_tx_function(struct rte_eth_dev *dev)
414 {
415         struct mrvl_priv *priv = dev->data->dev_private;
416
417         /* Use a simple Tx queue (no offloads, no multi segs) if possible */
418         if (priv->multiseg) {
419                 RTE_LOG(INFO, PMD, "Using multi-segment tx callback\n");
420                 dev->tx_pkt_burst = mrvl_tx_sg_pkt_burst;
421         } else {
422                 RTE_LOG(INFO, PMD, "Using single-segment tx callback\n");
423                 dev->tx_pkt_burst = mrvl_tx_pkt_burst;
424         }
425 }
426
427 /**
428  * Configure rss based on dpdk rss configuration.
429  *
430  * @param priv
431  *   Pointer to private structure.
432  * @param rss_conf
433  *   Pointer to RSS configuration.
434  *
435  * @return
436  *   0 on success, negative error value otherwise.
437  */
438 static int
439 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
440 {
441         if (rss_conf->rss_key)
442                 MRVL_LOG(WARNING, "Changing hash key is not supported");
443
444         if (rss_conf->rss_hf == 0) {
445                 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
446         } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
447                 priv->ppio_params.inqs_params.hash_type =
448                         PP2_PPIO_HASH_T_2_TUPLE;
449         } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
450                 priv->ppio_params.inqs_params.hash_type =
451                         PP2_PPIO_HASH_T_5_TUPLE;
452                 priv->rss_hf_tcp = 1;
453         } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
454                 priv->ppio_params.inqs_params.hash_type =
455                         PP2_PPIO_HASH_T_5_TUPLE;
456                 priv->rss_hf_tcp = 0;
457         } else {
458                 return -EINVAL;
459         }
460
461         return 0;
462 }
463
464 /**
465  * Ethernet device configuration.
466  *
467  * Prepare the driver for a given number of TX and RX queues and
468  * configure RSS.
469  *
470  * @param dev
471  *   Pointer to Ethernet device structure.
472  *
473  * @return
474  *   0 on success, negative error value otherwise.
475  */
476 static int
477 mrvl_dev_configure(struct rte_eth_dev *dev)
478 {
479         struct mrvl_priv *priv = dev->data->dev_private;
480         int ret;
481
482         if (priv->ppio) {
483                 MRVL_LOG(INFO, "Device reconfiguration is not supported");
484                 return -EINVAL;
485         }
486
487         if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
488             dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
489                 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
490                         dev->data->dev_conf.rxmode.mq_mode);
491                 return -EINVAL;
492         }
493
494         if (dev->data->dev_conf.rxmode.split_hdr_size) {
495                 MRVL_LOG(INFO, "Split headers not supported");
496                 return -EINVAL;
497         }
498
499         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
500                 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
501                                  MRVL_PP2_ETH_HDRS_LEN;
502                 if (dev->data->mtu > priv->max_mtu) {
503                         MRVL_LOG(ERR, "inherit MTU %u from max_rx_pkt_len %u is larger than max_mtu %u\n",
504                                  dev->data->mtu,
505                                  dev->data->dev_conf.rxmode.max_rx_pkt_len,
506                                  priv->max_mtu);
507                         return -EINVAL;
508                 }
509         }
510
511         if (dev->data->dev_conf.txmode.offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
512                 priv->multiseg = 1;
513
514         ret = mrvl_configure_rxqs(priv, dev->data->port_id,
515                                   dev->data->nb_rx_queues);
516         if (ret < 0)
517                 return ret;
518
519         ret = mrvl_configure_txqs(priv, dev->data->port_id,
520                                   dev->data->nb_tx_queues);
521         if (ret < 0)
522                 return ret;
523
524         priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
525         priv->ppio_params.maintain_stats = 1;
526         priv->nb_rx_queues = dev->data->nb_rx_queues;
527
528         ret = mrvl_tm_init(dev);
529         if (ret < 0)
530                 return ret;
531
532         if (dev->data->nb_rx_queues == 1 &&
533             dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
534                 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
535                 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
536
537                 return 0;
538         }
539
540         return mrvl_configure_rss(priv,
541                         &dev->data->dev_conf.rx_adv_conf.rss_conf);
542 }
543
544 /**
545  * DPDK callback to change the MTU.
546  *
547  * Setting the MTU affects hardware MRU (packets larger than the MRU
548  * will be dropped).
549  *
550  * @param dev
551  *   Pointer to Ethernet device structure.
552  * @param mtu
553  *   New MTU.
554  *
555  * @return
556  *   0 on success, negative error value otherwise.
557  */
558 static int
559 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
560 {
561         struct mrvl_priv *priv = dev->data->dev_private;
562         uint16_t mru;
563         uint16_t mbuf_data_size = 0; /* SW buffer size */
564         int ret;
565
566         mru = MRVL_PP2_MTU_TO_MRU(mtu);
567         /*
568          * min_rx_buf_size is equal to mbuf data size
569          * if pmd didn't set it differently
570          */
571         mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
572         /* Prevent PMD from:
573          * - setting mru greater than the mbuf size resulting in
574          * hw and sw buffer size mismatch
575          * - setting mtu that requires the support of scattered packets
576          * when this feature has not been enabled/supported so far
577          * (TODO check scattered_rx flag here once scattered RX is supported).
578          */
579         if (mru - RTE_ETHER_CRC_LEN + MRVL_PKT_OFFS > mbuf_data_size) {
580                 mru = mbuf_data_size + RTE_ETHER_CRC_LEN - MRVL_PKT_OFFS;
581                 mtu = MRVL_PP2_MRU_TO_MTU(mru);
582                 MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted "
583                         "by current mbuf size: %u. Set MTU to %u, MRU to %u",
584                         mbuf_data_size, mtu, mru);
585         }
586
587         if (mtu < RTE_ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {
588                 MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
589                 return -EINVAL;
590         }
591
592         dev->data->mtu = mtu;
593         dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
594
595         if (!priv->ppio)
596                 return 0;
597
598         ret = pp2_ppio_set_mru(priv->ppio, mru);
599         if (ret) {
600                 MRVL_LOG(ERR, "Failed to change MRU");
601                 return ret;
602         }
603
604         ret = pp2_ppio_set_mtu(priv->ppio, mtu);
605         if (ret) {
606                 MRVL_LOG(ERR, "Failed to change MTU");
607                 return ret;
608         }
609
610         return 0;
611 }
612
613 /**
614  * DPDK callback to bring the link up.
615  *
616  * @param dev
617  *   Pointer to Ethernet device structure.
618  *
619  * @return
620  *   0 on success, negative error value otherwise.
621  */
622 static int
623 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
624 {
625         struct mrvl_priv *priv = dev->data->dev_private;
626         int ret;
627
628         if (!priv->ppio) {
629                 dev->data->dev_link.link_status = ETH_LINK_UP;
630                 return 0;
631         }
632
633         ret = pp2_ppio_enable(priv->ppio);
634         if (ret)
635                 return ret;
636
637         /*
638          * mtu/mru can be updated if pp2_ppio_enable() was called at least once
639          * as pp2_ppio_enable() changes port->t_mode from default 0 to
640          * PP2_TRAFFIC_INGRESS_EGRESS.
641          *
642          * Set mtu to default DPDK value here.
643          */
644         ret = mrvl_mtu_set(dev, dev->data->mtu);
645         if (ret) {
646                 pp2_ppio_disable(priv->ppio);
647                 return ret;
648         }
649
650         dev->data->dev_link.link_status = ETH_LINK_UP;
651         return 0;
652 }
653
654 /**
655  * DPDK callback to bring the link down.
656  *
657  * @param dev
658  *   Pointer to Ethernet device structure.
659  *
660  * @return
661  *   0 on success, negative error value otherwise.
662  */
663 static int
664 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
665 {
666         struct mrvl_priv *priv = dev->data->dev_private;
667         int ret;
668
669         if (!priv->ppio) {
670                 dev->data->dev_link.link_status = ETH_LINK_DOWN;
671                 return 0;
672         }
673         ret = pp2_ppio_disable(priv->ppio);
674         if (ret)
675                 return ret;
676
677         dev->data->dev_link.link_status = ETH_LINK_DOWN;
678         return 0;
679 }
680
681 /**
682  * DPDK callback to start tx queue.
683  *
684  * @param dev
685  *   Pointer to Ethernet device structure.
686  * @param queue_id
687  *   Transmit queue index.
688  *
689  * @return
690  *   0 on success, negative error value otherwise.
691  */
692 static int
693 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
694 {
695         struct mrvl_priv *priv = dev->data->dev_private;
696         int ret;
697
698         if (!priv)
699                 return -EPERM;
700
701         /* passing 1 enables given tx queue */
702         ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
703         if (ret) {
704                 MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
705                 return ret;
706         }
707
708         dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
709
710         return 0;
711 }
712
713 /**
714  * DPDK callback to stop tx queue.
715  *
716  * @param dev
717  *   Pointer to Ethernet device structure.
718  * @param queue_id
719  *   Transmit queue index.
720  *
721  * @return
722  *   0 on success, negative error value otherwise.
723  */
724 static int
725 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
726 {
727         struct mrvl_priv *priv = dev->data->dev_private;
728         int ret;
729
730         if (!priv->ppio)
731                 return -EPERM;
732
733         /* passing 0 disables given tx queue */
734         ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
735         if (ret) {
736                 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
737                 return ret;
738         }
739
740         dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
741
742         return 0;
743 }
744
745 /**
746  * Populate VLAN Filter configuration.
747  *
748  * @param dev
749  *   Pointer to Ethernet device structure.
750  * @param on
751  *   Toggle filter.
752  *
753  * @return
754  *   0 on success, negative error value otherwise.
755  */
756 static int mrvl_populate_vlan_table(struct rte_eth_dev *dev, int on)
757 {
758         uint32_t j;
759         int ret;
760         struct rte_vlan_filter_conf *vfc;
761
762         vfc = &dev->data->vlan_filter_conf;
763         for (j = 0; j < RTE_DIM(vfc->ids); j++) {
764                 uint64_t vlan;
765                 uint64_t vbit;
766                 uint64_t ids = vfc->ids[j];
767
768                 if (ids == 0)
769                         continue;
770
771                 while (ids) {
772                         vlan = 64 * j;
773                         /* count trailing zeroes */
774                         vbit = ~ids & (ids - 1);
775                         /* clear least significant bit set */
776                         ids ^= (ids ^ (ids - 1)) ^ vbit;
777                         for (; vbit; vlan++)
778                                 vbit >>= 1;
779                         ret = mrvl_vlan_filter_set(dev, vlan, on);
780                         if (ret) {
781                                 MRVL_LOG(ERR, "Failed to setup VLAN filter\n");
782                                 return ret;
783                         }
784                 }
785         }
786
787         return 0;
788 }
789
790 /**
791  * DPDK callback to start the device.
792  *
793  * @param dev
794  *   Pointer to Ethernet device structure.
795  *
796  * @return
797  *   0 on success, negative errno value on failure.
798  */
799 static int
800 mrvl_dev_start(struct rte_eth_dev *dev)
801 {
802         struct mrvl_priv *priv = dev->data->dev_private;
803         char match[MRVL_MATCH_LEN];
804         int ret = 0, i, def_init_size;
805         struct rte_ether_addr *mac_addr;
806
807         if (priv->ppio)
808                 return mrvl_dev_set_link_up(dev);
809
810         snprintf(match, sizeof(match), "ppio-%d:%d",
811                  priv->pp_id, priv->ppio_id);
812         priv->ppio_params.match = match;
813         priv->ppio_params.eth_start_hdr = PP2_PPIO_HDR_ETH;
814         if (mrvl_cfg)
815                 priv->ppio_params.eth_start_hdr =
816                         mrvl_cfg->port[dev->data->port_id].eth_start_hdr;
817
818         /*
819          * Calculate the minimum bpool size for refill feature as follows:
820          * 2 default burst sizes multiply by number of rx queues.
821          * If the bpool size will be below this value, new buffers will
822          * be added to the pool.
823          */
824         priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
825
826         /* In case initial bpool size configured in queues setup is
827          * smaller than minimum size add more buffers
828          */
829         def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
830         if (priv->bpool_init_size < def_init_size) {
831                 int buffs_to_add = def_init_size - priv->bpool_init_size;
832
833                 priv->bpool_init_size += buffs_to_add;
834                 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
835                 if (ret)
836                         MRVL_LOG(ERR, "Failed to add buffers to bpool");
837         }
838
839         /*
840          * Calculate the maximum bpool size for refill feature as follows:
841          * maximum number of descriptors in rx queue multiply by number
842          * of rx queues plus minimum bpool size.
843          * In case the bpool size will exceed this value, superfluous buffers
844          * will be removed
845          */
846         priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
847                                 priv->bpool_min_size;
848
849         ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
850         if (ret) {
851                 MRVL_LOG(ERR, "Failed to init ppio");
852                 return ret;
853         }
854
855         /*
856          * In case there are some some stale uc/mc mac addresses flush them
857          * here. It cannot be done during mrvl_dev_close() as port information
858          * is already gone at that point (due to pp2_ppio_deinit() in
859          * mrvl_dev_stop()).
860          */
861         if (!priv->uc_mc_flushed) {
862                 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
863                 if (ret) {
864                         MRVL_LOG(ERR,
865                                 "Failed to flush uc/mc filter list");
866                         goto out;
867                 }
868                 priv->uc_mc_flushed = 1;
869         }
870
871         ret = mrvl_mtu_set(dev, dev->data->mtu);
872         if (ret)
873                 MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu);
874
875         if (!rte_is_zero_ether_addr(&dev->data->mac_addrs[0]))
876                 mrvl_mac_addr_set(dev, &dev->data->mac_addrs[0]);
877
878         for (i = 1; i < MRVL_MAC_ADDRS_MAX; i++) {
879                 mac_addr = &dev->data->mac_addrs[i];
880
881                 /* skip zero address */
882                 if (rte_is_zero_ether_addr(mac_addr))
883                         continue;
884
885                 mrvl_mac_addr_add(dev, mac_addr, i, 0);
886         }
887
888         if (dev->data->all_multicast == 1)
889                 mrvl_allmulticast_enable(dev);
890
891         if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_VLAN_FILTER) {
892                 ret = mrvl_populate_vlan_table(dev, 1);
893                 if (ret) {
894                         MRVL_LOG(ERR, "Failed to populate VLAN table");
895                         goto out;
896                 }
897         }
898
899         /* For default QoS config, don't start classifier. */
900         if (mrvl_cfg  &&
901             mrvl_cfg->port[dev->data->port_id].use_global_defaults == 0) {
902                 ret = mrvl_start_qos_mapping(priv);
903                 if (ret) {
904                         MRVL_LOG(ERR, "Failed to setup QoS mapping");
905                         goto out;
906                 }
907         }
908
909         ret = pp2_ppio_set_loopback(priv->ppio, dev->data->dev_conf.lpbk_mode);
910         if (ret) {
911                 MRVL_LOG(ERR, "Failed to set loopback");
912                 goto out;
913         }
914
915         if (dev->data->promiscuous == 1)
916                 mrvl_promiscuous_enable(dev);
917
918         if (dev->data->dev_link.link_status == ETH_LINK_UP) {
919                 ret = mrvl_dev_set_link_up(dev);
920                 if (ret) {
921                         MRVL_LOG(ERR, "Failed to set link up");
922                         dev->data->dev_link.link_status = ETH_LINK_DOWN;
923                         goto out;
924                 }
925         }
926
927         /* start tx queues */
928         for (i = 0; i < dev->data->nb_tx_queues; i++) {
929                 struct mrvl_txq *txq = dev->data->tx_queues[i];
930
931                 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
932
933                 if (!txq->tx_deferred_start)
934                         continue;
935
936                 /*
937                  * All txqs are started by default. Stop them
938                  * so that tx_deferred_start works as expected.
939                  */
940                 ret = mrvl_tx_queue_stop(dev, i);
941                 if (ret)
942                         goto out;
943         }
944
945         mrvl_flow_init(dev);
946         mrvl_mtr_init(dev);
947         mrvl_set_tx_function(dev);
948
949         return 0;
950 out:
951         MRVL_LOG(ERR, "Failed to start device");
952         pp2_ppio_deinit(priv->ppio);
953         return ret;
954 }
955
956 /**
957  * Flush receive queues.
958  *
959  * @param dev
960  *   Pointer to Ethernet device structure.
961  */
962 static void
963 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
964 {
965         int i;
966
967         MRVL_LOG(INFO, "Flushing rx queues");
968         for (i = 0; i < dev->data->nb_rx_queues; i++) {
969                 int ret, num;
970
971                 do {
972                         struct mrvl_rxq *q = dev->data->rx_queues[i];
973                         struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
974
975                         num = MRVL_PP2_RXD_MAX;
976                         ret = pp2_ppio_recv(q->priv->ppio,
977                                             q->priv->rxq_map[q->queue_id].tc,
978                                             q->priv->rxq_map[q->queue_id].inq,
979                                             descs, (uint16_t *)&num);
980                 } while (ret == 0 && num);
981         }
982 }
983
984 /**
985  * Flush transmit shadow queues.
986  *
987  * @param dev
988  *   Pointer to Ethernet device structure.
989  */
990 static void
991 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
992 {
993         int i, j;
994         struct mrvl_txq *txq;
995
996         MRVL_LOG(INFO, "Flushing tx shadow queues");
997         for (i = 0; i < dev->data->nb_tx_queues; i++) {
998                 txq = (struct mrvl_txq *)dev->data->tx_queues[i];
999
1000                 for (j = 0; j < RTE_MAX_LCORE; j++) {
1001                         struct mrvl_shadow_txq *sq;
1002
1003                         if (!hifs[j])
1004                                 continue;
1005
1006                         sq = &txq->shadow_txqs[j];
1007                         mrvl_free_sent_buffers(txq->priv->ppio,
1008                                 hifs[j], j, sq, txq->queue_id, 1);
1009                         while (sq->tail != sq->head) {
1010                                 uint64_t addr = cookie_addr_high |
1011                                         sq->ent[sq->tail].buff.cookie;
1012                                 rte_pktmbuf_free(
1013                                         (struct rte_mbuf *)addr);
1014                                 sq->tail = (sq->tail + 1) &
1015                                             MRVL_PP2_TX_SHADOWQ_MASK;
1016                         }
1017                         memset(sq, 0, sizeof(*sq));
1018                 }
1019         }
1020 }
1021
1022 /**
1023  * Flush hardware bpool (buffer-pool).
1024  *
1025  * @param dev
1026  *   Pointer to Ethernet device structure.
1027  */
1028 static void
1029 mrvl_flush_bpool(struct rte_eth_dev *dev)
1030 {
1031         struct mrvl_priv *priv = dev->data->dev_private;
1032         struct pp2_hif *hif;
1033         uint32_t num;
1034         int ret;
1035         unsigned int core_id = rte_lcore_id();
1036
1037         if (core_id == LCORE_ID_ANY)
1038                 core_id = rte_get_main_lcore();
1039
1040         hif = mrvl_get_hif(priv, core_id);
1041
1042         ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
1043         if (ret) {
1044                 MRVL_LOG(ERR, "Failed to get bpool buffers number");
1045                 return;
1046         }
1047
1048         while (num--) {
1049                 struct pp2_buff_inf inf;
1050                 uint64_t addr;
1051
1052                 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
1053                 if (ret)
1054                         break;
1055
1056                 addr = cookie_addr_high | inf.cookie;
1057                 rte_pktmbuf_free((struct rte_mbuf *)addr);
1058         }
1059 }
1060
1061 /**
1062  * DPDK callback to stop the device.
1063  *
1064  * @param dev
1065  *   Pointer to Ethernet device structure.
1066  */
1067 static int
1068 mrvl_dev_stop(struct rte_eth_dev *dev)
1069 {
1070         return mrvl_dev_set_link_down(dev);
1071 }
1072
1073 /**
1074  * DPDK callback to close the device.
1075  *
1076  * @param dev
1077  *   Pointer to Ethernet device structure.
1078  */
1079 static int
1080 mrvl_dev_close(struct rte_eth_dev *dev)
1081 {
1082         struct mrvl_priv *priv = dev->data->dev_private;
1083         size_t i;
1084
1085         if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1086                 return 0;
1087
1088         mrvl_flush_rx_queues(dev);
1089         mrvl_flush_tx_shadow_queues(dev);
1090         mrvl_flow_deinit(dev);
1091         mrvl_mtr_deinit(dev);
1092
1093         for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
1094                 struct pp2_ppio_tc_params *tc_params =
1095                         &priv->ppio_params.inqs_params.tcs_params[i];
1096
1097                 if (tc_params->inqs_params) {
1098                         rte_free(tc_params->inqs_params);
1099                         tc_params->inqs_params = NULL;
1100                 }
1101         }
1102
1103         if (priv->cls_tbl) {
1104                 pp2_cls_tbl_deinit(priv->cls_tbl);
1105                 priv->cls_tbl = NULL;
1106         }
1107
1108         if (priv->qos_tbl) {
1109                 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
1110                 priv->qos_tbl = NULL;
1111         }
1112
1113         mrvl_flush_bpool(dev);
1114         mrvl_tm_deinit(dev);
1115
1116         if (priv->ppio) {
1117                 pp2_ppio_deinit(priv->ppio);
1118                 priv->ppio = NULL;
1119         }
1120
1121         /* policer must be released after ppio deinitialization */
1122         if (priv->default_policer) {
1123                 pp2_cls_plcr_deinit(priv->default_policer);
1124                 priv->default_policer = NULL;
1125         }
1126
1127
1128         if (priv->bpool) {
1129                 pp2_bpool_deinit(priv->bpool);
1130                 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
1131                 priv->bpool = NULL;
1132         }
1133
1134         mrvl_dev_num--;
1135
1136         if (mrvl_dev_num == 0) {
1137                 MRVL_LOG(INFO, "Perform MUSDK deinit");
1138                 mrvl_deinit_hifs();
1139                 mrvl_deinit_pp2();
1140                 rte_mvep_deinit(MVEP_MOD_T_PP2);
1141         }
1142
1143         return 0;
1144 }
1145
1146 /**
1147  * DPDK callback to retrieve physical link information.
1148  *
1149  * @param dev
1150  *   Pointer to Ethernet device structure.
1151  * @param wait_to_complete
1152  *   Wait for request completion (ignored).
1153  *
1154  * @return
1155  *   0 on success, negative error value otherwise.
1156  */
1157 static int
1158 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
1159 {
1160         /*
1161          * TODO
1162          * once MUSDK provides necessary API use it here
1163          */
1164         struct mrvl_priv *priv = dev->data->dev_private;
1165         struct ethtool_cmd edata;
1166         struct ifreq req;
1167         int ret, fd, link_up;
1168
1169         if (!priv->ppio)
1170                 return -EPERM;
1171
1172         edata.cmd = ETHTOOL_GSET;
1173
1174         strcpy(req.ifr_name, dev->data->name);
1175         req.ifr_data = (void *)&edata;
1176
1177         fd = socket(AF_INET, SOCK_DGRAM, 0);
1178         if (fd == -1)
1179                 return -EFAULT;
1180
1181         ret = ioctl(fd, SIOCETHTOOL, &req);
1182         if (ret == -1) {
1183                 close(fd);
1184                 return -EFAULT;
1185         }
1186
1187         close(fd);
1188
1189         switch (ethtool_cmd_speed(&edata)) {
1190         case SPEED_10:
1191                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
1192                 break;
1193         case SPEED_100:
1194                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
1195                 break;
1196         case SPEED_1000:
1197                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
1198                 break;
1199         case SPEED_10000:
1200                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
1201                 break;
1202         default:
1203                 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
1204         }
1205
1206         dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
1207                                                          ETH_LINK_HALF_DUPLEX;
1208         dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
1209                                                            ETH_LINK_FIXED;
1210         pp2_ppio_get_link_state(priv->ppio, &link_up);
1211         dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
1212
1213         return 0;
1214 }
1215
1216 /**
1217  * DPDK callback to enable promiscuous mode.
1218  *
1219  * @param dev
1220  *   Pointer to Ethernet device structure.
1221  *
1222  * @return
1223  *   0 on success, negative error value otherwise.
1224  */
1225 static int
1226 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
1227 {
1228         struct mrvl_priv *priv = dev->data->dev_private;
1229         int ret;
1230
1231         if (priv->isolated)
1232                 return -ENOTSUP;
1233
1234         if (!priv->ppio)
1235                 return 0;
1236
1237         ret = pp2_ppio_set_promisc(priv->ppio, 1);
1238         if (ret) {
1239                 MRVL_LOG(ERR, "Failed to enable promiscuous mode");
1240                 return -EAGAIN;
1241         }
1242
1243         return 0;
1244 }
1245
1246 /**
1247  * DPDK callback to enable allmulti mode.
1248  *
1249  * @param dev
1250  *   Pointer to Ethernet device structure.
1251  *
1252  * @return
1253  *   0 on success, negative error value otherwise.
1254  */
1255 static int
1256 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
1257 {
1258         struct mrvl_priv *priv = dev->data->dev_private;
1259         int ret;
1260
1261         if (priv->isolated)
1262                 return -ENOTSUP;
1263
1264         if (!priv->ppio)
1265                 return 0;
1266
1267         ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
1268         if (ret) {
1269                 MRVL_LOG(ERR, "Failed enable all-multicast mode");
1270                 return -EAGAIN;
1271         }
1272
1273         return 0;
1274 }
1275
1276 /**
1277  * DPDK callback to disable promiscuous mode.
1278  *
1279  * @param dev
1280  *   Pointer to Ethernet device structure.
1281  *
1282  * @return
1283  *   0 on success, negative error value otherwise.
1284  */
1285 static int
1286 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
1287 {
1288         struct mrvl_priv *priv = dev->data->dev_private;
1289         int ret;
1290
1291         if (priv->isolated)
1292                 return -ENOTSUP;
1293
1294         if (!priv->ppio)
1295                 return 0;
1296
1297         ret = pp2_ppio_set_promisc(priv->ppio, 0);
1298         if (ret) {
1299                 MRVL_LOG(ERR, "Failed to disable promiscuous mode");
1300                 return -EAGAIN;
1301         }
1302
1303         return 0;
1304 }
1305
1306 /**
1307  * DPDK callback to disable allmulticast mode.
1308  *
1309  * @param dev
1310  *   Pointer to Ethernet device structure.
1311  *
1312  * @return
1313  *   0 on success, negative error value otherwise.
1314  */
1315 static int
1316 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
1317 {
1318         struct mrvl_priv *priv = dev->data->dev_private;
1319         int ret;
1320
1321         if (priv->isolated)
1322                 return -ENOTSUP;
1323
1324         if (!priv->ppio)
1325                 return 0;
1326
1327         ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
1328         if (ret) {
1329                 MRVL_LOG(ERR, "Failed to disable all-multicast mode");
1330                 return -EAGAIN;
1331         }
1332
1333         return 0;
1334 }
1335
1336 /**
1337  * DPDK callback to remove a MAC address.
1338  *
1339  * @param dev
1340  *   Pointer to Ethernet device structure.
1341  * @param index
1342  *   MAC address index.
1343  */
1344 static void
1345 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1346 {
1347         struct mrvl_priv *priv = dev->data->dev_private;
1348         char buf[RTE_ETHER_ADDR_FMT_SIZE];
1349         int ret;
1350
1351         if (priv->isolated)
1352                 return;
1353
1354         if (!priv->ppio)
1355                 return;
1356
1357         ret = pp2_ppio_remove_mac_addr(priv->ppio,
1358                                        dev->data->mac_addrs[index].addr_bytes);
1359         if (ret) {
1360                 rte_ether_format_addr(buf, sizeof(buf),
1361                                   &dev->data->mac_addrs[index]);
1362                 MRVL_LOG(ERR, "Failed to remove mac %s", buf);
1363         }
1364 }
1365
1366 /**
1367  * DPDK callback to add a MAC address.
1368  *
1369  * @param dev
1370  *   Pointer to Ethernet device structure.
1371  * @param mac_addr
1372  *   MAC address to register.
1373  * @param index
1374  *   MAC address index.
1375  * @param vmdq
1376  *   VMDq pool index to associate address with (unused).
1377  *
1378  * @return
1379  *   0 on success, negative error value otherwise.
1380  */
1381 static int
1382 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr,
1383                   uint32_t index, uint32_t vmdq __rte_unused)
1384 {
1385         struct mrvl_priv *priv = dev->data->dev_private;
1386         char buf[RTE_ETHER_ADDR_FMT_SIZE];
1387         int ret;
1388
1389         if (priv->isolated)
1390                 return -ENOTSUP;
1391
1392         if (!priv->ppio)
1393                 return 0;
1394
1395         if (index == 0)
1396                 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
1397                 return -1;
1398
1399         /*
1400          * Maximum number of uc addresses can be tuned via kernel module mvpp2x
1401          * parameter uc_filter_max. Maximum number of mc addresses is then
1402          * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
1403          * 21 respectively.
1404          *
1405          * If more than uc_filter_max uc addresses were added to filter list
1406          * then NIC will switch to promiscuous mode automatically.
1407          *
1408          * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
1409          * were added to filter list then NIC will switch to all-multicast mode
1410          * automatically.
1411          */
1412         ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
1413         if (ret) {
1414                 rte_ether_format_addr(buf, sizeof(buf), mac_addr);
1415                 MRVL_LOG(ERR, "Failed to add mac %s", buf);
1416                 return -1;
1417         }
1418
1419         return 0;
1420 }
1421
1422 /**
1423  * DPDK callback to set the primary MAC address.
1424  *
1425  * @param dev
1426  *   Pointer to Ethernet device structure.
1427  * @param mac_addr
1428  *   MAC address to register.
1429  *
1430  * @return
1431  *   0 on success, negative error value otherwise.
1432  */
1433 static int
1434 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1435 {
1436         struct mrvl_priv *priv = dev->data->dev_private;
1437         int ret;
1438
1439         if (priv->isolated)
1440                 return -ENOTSUP;
1441
1442         if (!priv->ppio)
1443                 return 0;
1444
1445         ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
1446         if (ret) {
1447                 char buf[RTE_ETHER_ADDR_FMT_SIZE];
1448                 rte_ether_format_addr(buf, sizeof(buf), mac_addr);
1449                 MRVL_LOG(ERR, "Failed to set mac to %s", buf);
1450         }
1451
1452         return ret;
1453 }
1454
1455 /**
1456  * DPDK callback to get device statistics.
1457  *
1458  * @param dev
1459  *   Pointer to Ethernet device structure.
1460  * @param stats
1461  *   Stats structure output buffer.
1462  *
1463  * @return
1464  *   0 on success, negative error value otherwise.
1465  */
1466 static int
1467 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1468 {
1469         struct mrvl_priv *priv = dev->data->dev_private;
1470         struct pp2_ppio_statistics ppio_stats;
1471         uint64_t drop_mac = 0;
1472         unsigned int i, idx, ret;
1473
1474         if (!priv->ppio)
1475                 return -EPERM;
1476
1477         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1478                 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1479                 struct pp2_ppio_inq_statistics rx_stats;
1480
1481                 if (!rxq)
1482                         continue;
1483
1484                 idx = rxq->queue_id;
1485                 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1486                         MRVL_LOG(ERR,
1487                                 "rx queue %d stats out of range (0 - %d)",
1488                                 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1489                         continue;
1490                 }
1491
1492                 ret = pp2_ppio_inq_get_statistics(priv->ppio,
1493                                                   priv->rxq_map[idx].tc,
1494                                                   priv->rxq_map[idx].inq,
1495                                                   &rx_stats, 0);
1496                 if (unlikely(ret)) {
1497                         MRVL_LOG(ERR,
1498                                 "Failed to update rx queue %d stats", idx);
1499                         break;
1500                 }
1501
1502                 stats->q_ibytes[idx] = rxq->bytes_recv;
1503                 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
1504                 stats->q_errors[idx] = rx_stats.drop_early +
1505                                        rx_stats.drop_fullq +
1506                                        rx_stats.drop_bm +
1507                                        rxq->drop_mac;
1508                 stats->ibytes += rxq->bytes_recv;
1509                 drop_mac += rxq->drop_mac;
1510         }
1511
1512         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1513                 struct mrvl_txq *txq = dev->data->tx_queues[i];
1514                 struct pp2_ppio_outq_statistics tx_stats;
1515
1516                 if (!txq)
1517                         continue;
1518
1519                 idx = txq->queue_id;
1520                 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1521                         MRVL_LOG(ERR,
1522                                 "tx queue %d stats out of range (0 - %d)",
1523                                 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1524                 }
1525
1526                 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1527                                                    &tx_stats, 0);
1528                 if (unlikely(ret)) {
1529                         MRVL_LOG(ERR,
1530                                 "Failed to update tx queue %d stats", idx);
1531                         break;
1532                 }
1533
1534                 stats->q_opackets[idx] = tx_stats.deq_desc;
1535                 stats->q_obytes[idx] = txq->bytes_sent;
1536                 stats->obytes += txq->bytes_sent;
1537         }
1538
1539         ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1540         if (unlikely(ret)) {
1541                 MRVL_LOG(ERR, "Failed to update port statistics");
1542                 return ret;
1543         }
1544
1545         stats->ipackets += ppio_stats.rx_packets - drop_mac;
1546         stats->opackets += ppio_stats.tx_packets;
1547         stats->imissed += ppio_stats.rx_fullq_dropped +
1548                           ppio_stats.rx_bm_dropped +
1549                           ppio_stats.rx_early_dropped +
1550                           ppio_stats.rx_fifo_dropped +
1551                           ppio_stats.rx_cls_dropped;
1552         stats->ierrors = drop_mac;
1553
1554         return 0;
1555 }
1556
1557 /**
1558  * DPDK callback to clear device statistics.
1559  *
1560  * @param dev
1561  *   Pointer to Ethernet device structure.
1562  *
1563  * @return
1564  *   0 on success, negative error value otherwise.
1565  */
1566 static int
1567 mrvl_stats_reset(struct rte_eth_dev *dev)
1568 {
1569         struct mrvl_priv *priv = dev->data->dev_private;
1570         int i;
1571
1572         if (!priv->ppio)
1573                 return 0;
1574
1575         for (i = 0; i < dev->data->nb_rx_queues; i++) {
1576                 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1577
1578                 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1579                                             priv->rxq_map[i].inq, NULL, 1);
1580                 rxq->bytes_recv = 0;
1581                 rxq->drop_mac = 0;
1582         }
1583
1584         for (i = 0; i < dev->data->nb_tx_queues; i++) {
1585                 struct mrvl_txq *txq = dev->data->tx_queues[i];
1586
1587                 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1588                 txq->bytes_sent = 0;
1589         }
1590
1591         return pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1592 }
1593
1594 /**
1595  * DPDK callback to get extended statistics.
1596  *
1597  * @param dev
1598  *   Pointer to Ethernet device structure.
1599  * @param stats
1600  *   Pointer to xstats table.
1601  * @param n
1602  *   Number of entries in xstats table.
1603  * @return
1604  *   Negative value on error, number of read xstats otherwise.
1605  */
1606 static int
1607 mrvl_xstats_get(struct rte_eth_dev *dev,
1608                 struct rte_eth_xstat *stats, unsigned int n)
1609 {
1610         struct mrvl_priv *priv = dev->data->dev_private;
1611         struct pp2_ppio_statistics ppio_stats;
1612         unsigned int i;
1613
1614         if (!stats)
1615                 return 0;
1616
1617         pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1618         for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
1619                 uint64_t val;
1620
1621                 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
1622                         val = *(uint32_t *)((uint8_t *)&ppio_stats +
1623                                             mrvl_xstats_tbl[i].offset);
1624                 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
1625                         val = *(uint64_t *)((uint8_t *)&ppio_stats +
1626                                             mrvl_xstats_tbl[i].offset);
1627                 else
1628                         return -EINVAL;
1629
1630                 stats[i].id = i;
1631                 stats[i].value = val;
1632         }
1633
1634         return n;
1635 }
1636
1637 /**
1638  * DPDK callback to reset extended statistics.
1639  *
1640  * @param dev
1641  *   Pointer to Ethernet device structure.
1642  *
1643  * @return
1644  *   0 on success, negative error value otherwise.
1645  */
1646 static int
1647 mrvl_xstats_reset(struct rte_eth_dev *dev)
1648 {
1649         return mrvl_stats_reset(dev);
1650 }
1651
1652 /**
1653  * DPDK callback to get extended statistics names.
1654  *
1655  * @param dev (unused)
1656  *   Pointer to Ethernet device structure.
1657  * @param xstats_names
1658  *   Pointer to xstats names table.
1659  * @param size
1660  *   Size of the xstats names table.
1661  * @return
1662  *   Number of read names.
1663  */
1664 static int
1665 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1666                       struct rte_eth_xstat_name *xstats_names,
1667                       unsigned int size)
1668 {
1669         unsigned int i;
1670
1671         if (!xstats_names)
1672                 return RTE_DIM(mrvl_xstats_tbl);
1673
1674         for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
1675                 strlcpy(xstats_names[i].name, mrvl_xstats_tbl[i].name,
1676                         RTE_ETH_XSTATS_NAME_SIZE);
1677
1678         return size;
1679 }
1680
1681 /**
1682  * DPDK callback to get information about the device.
1683  *
1684  * @param dev
1685  *   Pointer to Ethernet device structure (unused).
1686  * @param info
1687  *   Info structure output buffer.
1688  */
1689 static int
1690 mrvl_dev_infos_get(struct rte_eth_dev *dev,
1691                    struct rte_eth_dev_info *info)
1692 {
1693         struct mrvl_priv *priv = dev->data->dev_private;
1694
1695         info->speed_capa = ETH_LINK_SPEED_10M |
1696                            ETH_LINK_SPEED_100M |
1697                            ETH_LINK_SPEED_1G |
1698                            ETH_LINK_SPEED_10G;
1699
1700         info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1701         info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1702         info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1703
1704         info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1705         info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1706         info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1707
1708         info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1709         info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1710         info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1711
1712         info->rx_offload_capa = MRVL_RX_OFFLOADS;
1713         info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
1714
1715         info->tx_offload_capa = MRVL_TX_OFFLOADS;
1716         info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
1717
1718         info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1719                                        ETH_RSS_NONFRAG_IPV4_TCP |
1720                                        ETH_RSS_NONFRAG_IPV4_UDP;
1721
1722         /* By default packets are dropped if no descriptors are available */
1723         info->default_rxconf.rx_drop_en = 1;
1724
1725         info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1726         info->max_mtu = priv->max_mtu;
1727
1728         return 0;
1729 }
1730
1731 /**
1732  * Return supported packet types.
1733  *
1734  * @param dev
1735  *   Pointer to Ethernet device structure (unused).
1736  *
1737  * @return
1738  *   Const pointer to the table with supported packet types.
1739  */
1740 static const uint32_t *
1741 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1742 {
1743         static const uint32_t ptypes[] = {
1744                 RTE_PTYPE_L2_ETHER,
1745                 RTE_PTYPE_L2_ETHER_VLAN,
1746                 RTE_PTYPE_L2_ETHER_QINQ,
1747                 RTE_PTYPE_L3_IPV4,
1748                 RTE_PTYPE_L3_IPV4_EXT,
1749                 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1750                 RTE_PTYPE_L3_IPV6,
1751                 RTE_PTYPE_L3_IPV6_EXT,
1752                 RTE_PTYPE_L2_ETHER_ARP,
1753                 RTE_PTYPE_L4_TCP,
1754                 RTE_PTYPE_L4_UDP
1755         };
1756
1757         return ptypes;
1758 }
1759
1760 /**
1761  * DPDK callback to get information about specific receive queue.
1762  *
1763  * @param dev
1764  *   Pointer to Ethernet device structure.
1765  * @param rx_queue_id
1766  *   Receive queue index.
1767  * @param qinfo
1768  *   Receive queue information structure.
1769  */
1770 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1771                               struct rte_eth_rxq_info *qinfo)
1772 {
1773         struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1774         struct mrvl_priv *priv = dev->data->dev_private;
1775         int inq = priv->rxq_map[rx_queue_id].inq;
1776         int tc = priv->rxq_map[rx_queue_id].tc;
1777         struct pp2_ppio_tc_params *tc_params =
1778                 &priv->ppio_params.inqs_params.tcs_params[tc];
1779
1780         qinfo->mp = q->mp;
1781         qinfo->nb_desc = tc_params->inqs_params[inq].size;
1782 }
1783
1784 /**
1785  * DPDK callback to get information about specific transmit queue.
1786  *
1787  * @param dev
1788  *   Pointer to Ethernet device structure.
1789  * @param tx_queue_id
1790  *   Transmit queue index.
1791  * @param qinfo
1792  *   Transmit queue information structure.
1793  */
1794 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1795                               struct rte_eth_txq_info *qinfo)
1796 {
1797         struct mrvl_priv *priv = dev->data->dev_private;
1798         struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
1799
1800         qinfo->nb_desc =
1801                 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1802         qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1803 }
1804
1805 /**
1806  * DPDK callback to Configure a VLAN filter.
1807  *
1808  * @param dev
1809  *   Pointer to Ethernet device structure.
1810  * @param vlan_id
1811  *   VLAN ID to filter.
1812  * @param on
1813  *   Toggle filter.
1814  *
1815  * @return
1816  *   0 on success, negative error value otherwise.
1817  */
1818 static int
1819 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1820 {
1821         struct mrvl_priv *priv = dev->data->dev_private;
1822
1823         if (priv->isolated)
1824                 return -ENOTSUP;
1825
1826         if (!priv->ppio)
1827                 return 0;
1828
1829         return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1830                     pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1831 }
1832
1833 /**
1834  * DPDK callback to Configure VLAN offload.
1835  *
1836  * @param dev
1837  *   Pointer to Ethernet device structure.
1838  * @param mask
1839  *   VLAN offload mask.
1840  *
1841  * @return
1842  *   0 on success, negative error value otherwise.
1843  */
1844 static int mrvl_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1845 {
1846         uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1847         int ret;
1848
1849         if (mask & ETH_VLAN_STRIP_MASK)
1850                 MRVL_LOG(ERR, "VLAN stripping is not supported\n");
1851
1852         if (mask & ETH_VLAN_FILTER_MASK) {
1853                 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1854                         ret = mrvl_populate_vlan_table(dev, 1);
1855                 else
1856                         ret = mrvl_populate_vlan_table(dev, 0);
1857
1858                 if (ret)
1859                         return ret;
1860         }
1861
1862         if (mask & ETH_VLAN_EXTEND_MASK)
1863                 MRVL_LOG(ERR, "Extend VLAN not supported\n");
1864
1865         return 0;
1866 }
1867
1868 /**
1869  * Release buffers to hardware bpool (buffer-pool)
1870  *
1871  * @param rxq
1872  *   Receive queue pointer.
1873  * @param num
1874  *   Number of buffers to release to bpool.
1875  *
1876  * @return
1877  *   0 on success, negative error value otherwise.
1878  */
1879 static int
1880 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1881 {
1882         struct buff_release_entry entries[num];
1883         struct rte_mbuf *mbufs[num];
1884         int i, ret;
1885         unsigned int core_id;
1886         struct pp2_hif *hif;
1887         struct pp2_bpool *bpool;
1888
1889         core_id = rte_lcore_id();
1890         if (core_id == LCORE_ID_ANY)
1891                 core_id = rte_get_main_lcore();
1892
1893         hif = mrvl_get_hif(rxq->priv, core_id);
1894         if (!hif)
1895                 return -1;
1896
1897         bpool = rxq->priv->bpool;
1898
1899         ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1900         if (ret)
1901                 return ret;
1902
1903         if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1904                 cookie_addr_high =
1905                         (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1906
1907         for (i = 0; i < num; i++) {
1908                 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1909                         != cookie_addr_high) {
1910                         MRVL_LOG(ERR,
1911                                 "mbuf virtual addr high is out of range "
1912                                 "0x%x instead of 0x%x\n",
1913                                 (uint32_t)((uint64_t)mbufs[i] >> 32),
1914                                 (uint32_t)(cookie_addr_high >> 32));
1915                         goto out;
1916                 }
1917
1918                 entries[i].buff.addr =
1919                         rte_mbuf_data_iova_default(mbufs[i]);
1920                 entries[i].buff.cookie = (uintptr_t)mbufs[i];
1921                 entries[i].bpool = bpool;
1922         }
1923
1924         pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1925         mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1926
1927         if (i != num)
1928                 goto out;
1929
1930         return 0;
1931 out:
1932         for (; i < num; i++)
1933                 rte_pktmbuf_free(mbufs[i]);
1934
1935         return -1;
1936 }
1937
1938 /**
1939  * DPDK callback to configure the receive queue.
1940  *
1941  * @param dev
1942  *   Pointer to Ethernet device structure.
1943  * @param idx
1944  *   RX queue index.
1945  * @param desc
1946  *   Number of descriptors to configure in queue.
1947  * @param socket
1948  *   NUMA socket on which memory must be allocated.
1949  * @param conf
1950  *   Thresholds parameters.
1951  * @param mp
1952  *   Memory pool for buffer allocations.
1953  *
1954  * @return
1955  *   0 on success, negative error value otherwise.
1956  */
1957 static int
1958 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1959                     unsigned int socket,
1960                     const struct rte_eth_rxconf *conf,
1961                     struct rte_mempool *mp)
1962 {
1963         struct mrvl_priv *priv = dev->data->dev_private;
1964         struct mrvl_rxq *rxq;
1965         uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
1966         uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1967         int ret, tc, inq;
1968         uint64_t offloads;
1969
1970         offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1971
1972         if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1973                 /*
1974                  * Unknown TC mapping, mapping will not have a correct queue.
1975                  */
1976                 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
1977                         idx, priv->ppio_id);
1978                 return -EFAULT;
1979         }
1980
1981         frame_size = buf_size - RTE_PKTMBUF_HEADROOM -
1982                      MRVL_PKT_EFFEC_OFFS + RTE_ETHER_CRC_LEN;
1983         if (frame_size < max_rx_pkt_len) {
1984                 MRVL_LOG(WARNING,
1985                         "Mbuf size must be increased to %u bytes to hold up "
1986                         "to %u bytes of data.",
1987                         buf_size + max_rx_pkt_len - frame_size,
1988                         max_rx_pkt_len);
1989                 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1990                 MRVL_LOG(INFO, "Setting max rx pkt len to %u",
1991                         dev->data->dev_conf.rxmode.max_rx_pkt_len);
1992         }
1993
1994         if (dev->data->rx_queues[idx]) {
1995                 rte_free(dev->data->rx_queues[idx]);
1996                 dev->data->rx_queues[idx] = NULL;
1997         }
1998
1999         rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
2000         if (!rxq)
2001                 return -ENOMEM;
2002
2003         rxq->priv = priv;
2004         rxq->mp = mp;
2005         rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
2006         rxq->queue_id = idx;
2007         rxq->port_id = dev->data->port_id;
2008         mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
2009
2010         tc = priv->rxq_map[rxq->queue_id].tc,
2011         inq = priv->rxq_map[rxq->queue_id].inq;
2012         priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
2013                 desc;
2014
2015         ret = mrvl_fill_bpool(rxq, desc);
2016         if (ret) {
2017                 rte_free(rxq);
2018                 return ret;
2019         }
2020
2021         priv->bpool_init_size += desc;
2022
2023         dev->data->rx_queues[idx] = rxq;
2024
2025         return 0;
2026 }
2027
2028 /**
2029  * DPDK callback to release the receive queue.
2030  *
2031  * @param rxq
2032  *   Generic receive queue pointer.
2033  */
2034 static void
2035 mrvl_rx_queue_release(void *rxq)
2036 {
2037         struct mrvl_rxq *q = rxq;
2038         struct pp2_ppio_tc_params *tc_params;
2039         int i, num, tc, inq;
2040         struct pp2_hif *hif;
2041         unsigned int core_id = rte_lcore_id();
2042
2043         if (core_id == LCORE_ID_ANY)
2044                 core_id = rte_get_main_lcore();
2045
2046         if (!q)
2047                 return;
2048
2049         hif = mrvl_get_hif(q->priv, core_id);
2050
2051         if (!hif)
2052                 return;
2053
2054         tc = q->priv->rxq_map[q->queue_id].tc;
2055         inq = q->priv->rxq_map[q->queue_id].inq;
2056         tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
2057         num = tc_params->inqs_params[inq].size;
2058         for (i = 0; i < num; i++) {
2059                 struct pp2_buff_inf inf;
2060                 uint64_t addr;
2061
2062                 pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
2063                 addr = cookie_addr_high | inf.cookie;
2064                 rte_pktmbuf_free((struct rte_mbuf *)addr);
2065         }
2066
2067         rte_free(q);
2068 }
2069
2070 /**
2071  * DPDK callback to configure the transmit queue.
2072  *
2073  * @param dev
2074  *   Pointer to Ethernet device structure.
2075  * @param idx
2076  *   Transmit queue index.
2077  * @param desc
2078  *   Number of descriptors to configure in the queue.
2079  * @param socket
2080  *   NUMA socket on which memory must be allocated.
2081  * @param conf
2082  *   Tx queue configuration parameters.
2083  *
2084  * @return
2085  *   0 on success, negative error value otherwise.
2086  */
2087 static int
2088 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
2089                     unsigned int socket,
2090                     const struct rte_eth_txconf *conf)
2091 {
2092         struct mrvl_priv *priv = dev->data->dev_private;
2093         struct mrvl_txq *txq;
2094
2095         if (dev->data->tx_queues[idx]) {
2096                 rte_free(dev->data->tx_queues[idx]);
2097                 dev->data->tx_queues[idx] = NULL;
2098         }
2099
2100         txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
2101         if (!txq)
2102                 return -ENOMEM;
2103
2104         txq->priv = priv;
2105         txq->queue_id = idx;
2106         txq->port_id = dev->data->port_id;
2107         txq->tx_deferred_start = conf->tx_deferred_start;
2108         dev->data->tx_queues[idx] = txq;
2109
2110         priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
2111
2112         return 0;
2113 }
2114
2115 /**
2116  * DPDK callback to release the transmit queue.
2117  *
2118  * @param txq
2119  *   Generic transmit queue pointer.
2120  */
2121 static void
2122 mrvl_tx_queue_release(void *txq)
2123 {
2124         struct mrvl_txq *q = txq;
2125
2126         if (!q)
2127                 return;
2128
2129         rte_free(q);
2130 }
2131
2132 /**
2133  * DPDK callback to get flow control configuration.
2134  *
2135  * @param dev
2136  *  Pointer to Ethernet device structure.
2137  * @param fc_conf
2138  *  Pointer to the flow control configuration.
2139  *
2140  * @return
2141  *  0 on success, negative error value otherwise.
2142  */
2143 static int
2144 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2145 {
2146         struct mrvl_priv *priv = dev->data->dev_private;
2147         int ret, en;
2148
2149         if (!priv)
2150                 return -EPERM;
2151
2152         fc_conf->autoneg = 1;
2153         ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
2154         if (ret) {
2155                 MRVL_LOG(ERR, "Failed to read rx pause state");
2156                 return ret;
2157         }
2158
2159         fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
2160
2161         ret = pp2_ppio_get_tx_pause(priv->ppio, &en);
2162         if (ret) {
2163                 MRVL_LOG(ERR, "Failed to read tx pause state");
2164                 return ret;
2165         }
2166
2167         if (en) {
2168                 if (fc_conf->mode == RTE_FC_NONE)
2169                         fc_conf->mode = RTE_FC_TX_PAUSE;
2170                 else
2171                         fc_conf->mode = RTE_FC_FULL;
2172         }
2173
2174         return 0;
2175 }
2176
2177 /**
2178  * DPDK callback to set flow control configuration.
2179  *
2180  * @param dev
2181  *  Pointer to Ethernet device structure.
2182  * @param fc_conf
2183  *  Pointer to the flow control configuration.
2184  *
2185  * @return
2186  *  0 on success, negative error value otherwise.
2187  */
2188 static int
2189 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
2190 {
2191         struct mrvl_priv *priv = dev->data->dev_private;
2192         struct pp2_ppio_tx_pause_params mrvl_pause_params;
2193         int ret;
2194         int rx_en, tx_en;
2195
2196         if (!priv)
2197                 return -EPERM;
2198
2199         if (fc_conf->high_water ||
2200             fc_conf->low_water ||
2201             fc_conf->pause_time ||
2202             fc_conf->mac_ctrl_frame_fwd) {
2203                 MRVL_LOG(ERR, "Flowctrl parameter is not supported");
2204
2205                 return -EINVAL;
2206         }
2207
2208         if (fc_conf->autoneg == 0) {
2209                 MRVL_LOG(ERR, "Flowctrl Autoneg disable is not supported");
2210                 return -EINVAL;
2211         }
2212
2213         switch (fc_conf->mode) {
2214         case RTE_FC_FULL:
2215                 rx_en = 1;
2216                 tx_en = 1;
2217                 break;
2218         case RTE_FC_TX_PAUSE:
2219                 rx_en = 0;
2220                 tx_en = 1;
2221                 break;
2222         case RTE_FC_RX_PAUSE:
2223                 rx_en = 1;
2224                 tx_en = 0;
2225                 break;
2226         case RTE_FC_NONE:
2227                 rx_en = 0;
2228                 tx_en = 0;
2229                 break;
2230         default:
2231                 MRVL_LOG(ERR, "Incorrect Flow control flag (%d)",
2232                          fc_conf->mode);
2233                 return -EINVAL;
2234         }
2235
2236         /* Set RX flow control */
2237         ret = pp2_ppio_set_rx_pause(priv->ppio, rx_en);
2238         if (ret) {
2239                 MRVL_LOG(ERR, "Failed to change RX flowctrl");
2240                 return ret;
2241         }
2242
2243         /* Set TX flow control */
2244         mrvl_pause_params.en = tx_en;
2245         /* all inqs participate in xon/xoff decision */
2246         mrvl_pause_params.use_tc_pause_inqs = 0;
2247         ret = pp2_ppio_set_tx_pause(priv->ppio, &mrvl_pause_params);
2248         if (ret) {
2249                 MRVL_LOG(ERR, "Failed to change TX flowctrl");
2250                 return ret;
2251         }
2252
2253         return 0;
2254 }
2255
2256 /**
2257  * Update RSS hash configuration
2258  *
2259  * @param dev
2260  *   Pointer to Ethernet device structure.
2261  * @param rss_conf
2262  *   Pointer to RSS configuration.
2263  *
2264  * @return
2265  *   0 on success, negative error value otherwise.
2266  */
2267 static int
2268 mrvl_rss_hash_update(struct rte_eth_dev *dev,
2269                      struct rte_eth_rss_conf *rss_conf)
2270 {
2271         struct mrvl_priv *priv = dev->data->dev_private;
2272
2273         if (priv->isolated)
2274                 return -ENOTSUP;
2275
2276         return mrvl_configure_rss(priv, rss_conf);
2277 }
2278
2279 /**
2280  * DPDK callback to get RSS hash configuration.
2281  *
2282  * @param dev
2283  *   Pointer to Ethernet device structure.
2284  * @rss_conf
2285  *   Pointer to RSS configuration.
2286  *
2287  * @return
2288  *   Always 0.
2289  */
2290 static int
2291 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
2292                        struct rte_eth_rss_conf *rss_conf)
2293 {
2294         struct mrvl_priv *priv = dev->data->dev_private;
2295         enum pp2_ppio_hash_type hash_type =
2296                 priv->ppio_params.inqs_params.hash_type;
2297
2298         rss_conf->rss_key = NULL;
2299
2300         if (hash_type == PP2_PPIO_HASH_T_NONE)
2301                 rss_conf->rss_hf = 0;
2302         else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
2303                 rss_conf->rss_hf = ETH_RSS_IPV4;
2304         else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
2305                 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
2306         else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
2307                 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
2308
2309         return 0;
2310 }
2311
2312 /**
2313  * DPDK callback to get rte_flow callbacks.
2314  *
2315  * @param dev
2316  *   Pointer to the device structure.
2317  * @param filer_type
2318  *   Flow filter type.
2319  * @param filter_op
2320  *   Flow filter operation.
2321  * @param arg
2322  *   Pointer to pass the flow ops.
2323  *
2324  * @return
2325  *   0 on success, negative error value otherwise.
2326  */
2327 static int
2328 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
2329                      enum rte_filter_type filter_type,
2330                      enum rte_filter_op filter_op, void *arg)
2331 {
2332         switch (filter_type) {
2333         case RTE_ETH_FILTER_GENERIC:
2334                 if (filter_op != RTE_ETH_FILTER_GET)
2335                         return -EINVAL;
2336                 *(const void **)arg = &mrvl_flow_ops;
2337                 return 0;
2338         default:
2339                 MRVL_LOG(WARNING, "Filter type (%d) not supported",
2340                                 filter_type);
2341                 return -EINVAL;
2342         }
2343 }
2344
2345 /**
2346  * DPDK callback to get rte_mtr callbacks.
2347  *
2348  * @param dev
2349  *   Pointer to the device structure.
2350  * @param ops
2351  *   Pointer to pass the mtr ops.
2352  *
2353  * @return
2354  *   Always 0.
2355  */
2356 static int
2357 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2358 {
2359         *(const void **)ops = &mrvl_mtr_ops;
2360
2361         return 0;
2362 }
2363
2364 /**
2365  * DPDK callback to get rte_tm callbacks.
2366  *
2367  * @param dev
2368  *   Pointer to the device structure.
2369  * @param ops
2370  *   Pointer to pass the tm ops.
2371  *
2372  * @return
2373  *   Always 0.
2374  */
2375 static int
2376 mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
2377 {
2378         *(const void **)ops = &mrvl_tm_ops;
2379
2380         return 0;
2381 }
2382
2383 static const struct eth_dev_ops mrvl_ops = {
2384         .dev_configure = mrvl_dev_configure,
2385         .dev_start = mrvl_dev_start,
2386         .dev_stop = mrvl_dev_stop,
2387         .dev_set_link_up = mrvl_dev_set_link_up,
2388         .dev_set_link_down = mrvl_dev_set_link_down,
2389         .dev_close = mrvl_dev_close,
2390         .link_update = mrvl_link_update,
2391         .promiscuous_enable = mrvl_promiscuous_enable,
2392         .allmulticast_enable = mrvl_allmulticast_enable,
2393         .promiscuous_disable = mrvl_promiscuous_disable,
2394         .allmulticast_disable = mrvl_allmulticast_disable,
2395         .mac_addr_remove = mrvl_mac_addr_remove,
2396         .mac_addr_add = mrvl_mac_addr_add,
2397         .mac_addr_set = mrvl_mac_addr_set,
2398         .mtu_set = mrvl_mtu_set,
2399         .stats_get = mrvl_stats_get,
2400         .stats_reset = mrvl_stats_reset,
2401         .xstats_get = mrvl_xstats_get,
2402         .xstats_reset = mrvl_xstats_reset,
2403         .xstats_get_names = mrvl_xstats_get_names,
2404         .dev_infos_get = mrvl_dev_infos_get,
2405         .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
2406         .rxq_info_get = mrvl_rxq_info_get,
2407         .txq_info_get = mrvl_txq_info_get,
2408         .vlan_filter_set = mrvl_vlan_filter_set,
2409         .vlan_offload_set = mrvl_vlan_offload_set,
2410         .tx_queue_start = mrvl_tx_queue_start,
2411         .tx_queue_stop = mrvl_tx_queue_stop,
2412         .rx_queue_setup = mrvl_rx_queue_setup,
2413         .rx_queue_release = mrvl_rx_queue_release,
2414         .tx_queue_setup = mrvl_tx_queue_setup,
2415         .tx_queue_release = mrvl_tx_queue_release,
2416         .flow_ctrl_get = mrvl_flow_ctrl_get,
2417         .flow_ctrl_set = mrvl_flow_ctrl_set,
2418         .rss_hash_update = mrvl_rss_hash_update,
2419         .rss_hash_conf_get = mrvl_rss_hash_conf_get,
2420         .filter_ctrl = mrvl_eth_filter_ctrl,
2421         .mtr_ops_get = mrvl_mtr_ops_get,
2422         .tm_ops_get = mrvl_tm_ops_get,
2423 };
2424
2425 /**
2426  * Return packet type information and l3/l4 offsets.
2427  *
2428  * @param desc
2429  *   Pointer to the received packet descriptor.
2430  * @param l3_offset
2431  *   l3 packet offset.
2432  * @param l4_offset
2433  *   l4 packet offset.
2434  *
2435  * @return
2436  *   Packet type information.
2437  */
2438 static inline uint64_t
2439 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
2440                                     uint8_t *l3_offset, uint8_t *l4_offset)
2441 {
2442         enum pp2_inq_l3_type l3_type;
2443         enum pp2_inq_l4_type l4_type;
2444         enum pp2_inq_vlan_tag vlan_tag;
2445         uint64_t packet_type;
2446
2447         pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
2448         pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
2449         pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
2450
2451         packet_type = RTE_PTYPE_L2_ETHER;
2452
2453         switch (vlan_tag) {
2454         case PP2_INQ_VLAN_TAG_SINGLE:
2455                 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
2456                 break;
2457         case PP2_INQ_VLAN_TAG_DOUBLE:
2458         case PP2_INQ_VLAN_TAG_TRIPLE:
2459                 packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
2460                 break;
2461         default:
2462                 break;
2463         }
2464
2465         switch (l3_type) {
2466         case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
2467                 packet_type |= RTE_PTYPE_L3_IPV4;
2468                 break;
2469         case PP2_INQ_L3_TYPE_IPV4_OK:
2470                 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
2471                 break;
2472         case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
2473                 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
2474                 break;
2475         case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
2476                 packet_type |= RTE_PTYPE_L3_IPV6;
2477                 break;
2478         case PP2_INQ_L3_TYPE_IPV6_EXT:
2479                 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
2480                 break;
2481         case PP2_INQ_L3_TYPE_ARP:
2482                 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
2483                 /*
2484                  * In case of ARP l4_offset is set to wrong value.
2485                  * Set it to proper one so that later on mbuf->l3_len can be
2486                  * calculated subtracting l4_offset and l3_offset.
2487                  */
2488                 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
2489                 break;
2490         default:
2491                 break;
2492         }
2493
2494         switch (l4_type) {
2495         case PP2_INQ_L4_TYPE_TCP:
2496                 packet_type |= RTE_PTYPE_L4_TCP;
2497                 break;
2498         case PP2_INQ_L4_TYPE_UDP:
2499                 packet_type |= RTE_PTYPE_L4_UDP;
2500                 break;
2501         default:
2502                 break;
2503         }
2504
2505         return packet_type;
2506 }
2507
2508 /**
2509  * Get offload information from the received packet descriptor.
2510  *
2511  * @param desc
2512  *   Pointer to the received packet descriptor.
2513  *
2514  * @return
2515  *   Mbuf offload flags.
2516  */
2517 static inline uint64_t
2518 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
2519 {
2520         uint64_t flags;
2521         enum pp2_inq_desc_status status;
2522
2523         status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
2524         if (unlikely(status != PP2_DESC_ERR_OK))
2525                 flags = PKT_RX_IP_CKSUM_BAD;
2526         else
2527                 flags = PKT_RX_IP_CKSUM_GOOD;
2528
2529         status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
2530         if (unlikely(status != PP2_DESC_ERR_OK))
2531                 flags |= PKT_RX_L4_CKSUM_BAD;
2532         else
2533                 flags |= PKT_RX_L4_CKSUM_GOOD;
2534
2535         return flags;
2536 }
2537
2538 /**
2539  * DPDK callback for receive.
2540  *
2541  * @param rxq
2542  *   Generic pointer to the receive queue.
2543  * @param rx_pkts
2544  *   Array to store received packets.
2545  * @param nb_pkts
2546  *   Maximum number of packets in array.
2547  *
2548  * @return
2549  *   Number of packets successfully received.
2550  */
2551 static uint16_t
2552 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2553 {
2554         struct mrvl_rxq *q = rxq;
2555         struct pp2_ppio_desc descs[nb_pkts];
2556         struct pp2_bpool *bpool;
2557         int i, ret, rx_done = 0;
2558         int num;
2559         struct pp2_hif *hif;
2560         unsigned int core_id = rte_lcore_id();
2561
2562         hif = mrvl_get_hif(q->priv, core_id);
2563
2564         if (unlikely(!q->priv->ppio || !hif))
2565                 return 0;
2566
2567         bpool = q->priv->bpool;
2568
2569         ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
2570                             q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
2571         if (unlikely(ret < 0))
2572                 return 0;
2573
2574         mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
2575
2576         for (i = 0; i < nb_pkts; i++) {
2577                 struct rte_mbuf *mbuf;
2578                 uint8_t l3_offset, l4_offset;
2579                 enum pp2_inq_desc_status status;
2580                 uint64_t addr;
2581
2582                 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2583                         struct pp2_ppio_desc *pref_desc;
2584                         u64 pref_addr;
2585
2586                         pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
2587                         pref_addr = cookie_addr_high |
2588                                     pp2_ppio_inq_desc_get_cookie(pref_desc);
2589                         rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
2590                         rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
2591                 }
2592
2593                 addr = cookie_addr_high |
2594                        pp2_ppio_inq_desc_get_cookie(&descs[i]);
2595                 mbuf = (struct rte_mbuf *)addr;
2596                 rte_pktmbuf_reset(mbuf);
2597
2598                 /* drop packet in case of mac, overrun or resource error */
2599                 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
2600                 if (unlikely(status != PP2_DESC_ERR_OK)) {
2601                         struct pp2_buff_inf binf = {
2602                                 .addr = rte_mbuf_data_iova_default(mbuf),
2603                                 .cookie = (uint64_t)mbuf,
2604                         };
2605
2606                         pp2_bpool_put_buff(hif, bpool, &binf);
2607                         mrvl_port_bpool_size
2608                                 [bpool->pp2_id][bpool->id][core_id]++;
2609                         q->drop_mac++;
2610                         continue;
2611                 }
2612
2613                 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
2614                 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
2615                 mbuf->data_len = mbuf->pkt_len;
2616                 mbuf->port = q->port_id;
2617                 mbuf->packet_type =
2618                         mrvl_desc_to_packet_type_and_offset(&descs[i],
2619                                                             &l3_offset,
2620                                                             &l4_offset);
2621                 mbuf->l2_len = l3_offset;
2622                 mbuf->l3_len = l4_offset - l3_offset;
2623
2624                 if (likely(q->cksum_enabled))
2625                         mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
2626
2627                 rx_pkts[rx_done++] = mbuf;
2628                 q->bytes_recv += mbuf->pkt_len;
2629         }
2630
2631         if (rte_spinlock_trylock(&q->priv->lock) == 1) {
2632                 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
2633
2634                 if (unlikely(num <= q->priv->bpool_min_size ||
2635                              (!rx_done && num < q->priv->bpool_init_size))) {
2636                         mrvl_fill_bpool(q, MRVL_BURST_SIZE);
2637                 } else if (unlikely(num > q->priv->bpool_max_size)) {
2638                         int i;
2639                         int pkt_to_remove = num - q->priv->bpool_init_size;
2640                         struct rte_mbuf *mbuf;
2641                         struct pp2_buff_inf buff;
2642
2643                         for (i = 0; i < pkt_to_remove; i++) {
2644                                 ret = pp2_bpool_get_buff(hif, bpool, &buff);
2645                                 if (ret)
2646                                         break;
2647                                 mbuf = (struct rte_mbuf *)
2648                                         (cookie_addr_high | buff.cookie);
2649                                 rte_pktmbuf_free(mbuf);
2650                         }
2651                         mrvl_port_bpool_size
2652                                 [bpool->pp2_id][bpool->id][core_id] -= i;
2653                 }
2654                 rte_spinlock_unlock(&q->priv->lock);
2655         }
2656
2657         return rx_done;
2658 }
2659
2660 /**
2661  * Prepare offload information.
2662  *
2663  * @param ol_flags
2664  *   Offload flags.
2665  * @param l3_type
2666  *   Pointer to the pp2_ouq_l3_type structure.
2667  * @param l4_type
2668  *   Pointer to the pp2_outq_l4_type structure.
2669  * @param gen_l3_cksum
2670  *   Will be set to 1 in case l3 checksum is computed.
2671  * @param l4_cksum
2672  *   Will be set to 1 in case l4 checksum is computed.
2673  */
2674 static inline void
2675 mrvl_prepare_proto_info(uint64_t ol_flags,
2676                         enum pp2_outq_l3_type *l3_type,
2677                         enum pp2_outq_l4_type *l4_type,
2678                         int *gen_l3_cksum,
2679                         int *gen_l4_cksum)
2680 {
2681         /*
2682          * Based on ol_flags prepare information
2683          * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
2684          * for offloading.
2685          * in most of the checksum cases ipv4 must be set, so this is the
2686          * default value
2687          */
2688         *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
2689         *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
2690
2691         if (ol_flags & PKT_TX_IPV6) {
2692                 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
2693                 /* no checksum for ipv6 header */
2694                 *gen_l3_cksum = 0;
2695         }
2696
2697         if ((ol_flags & PKT_TX_L4_MASK) == PKT_TX_TCP_CKSUM) {
2698                 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
2699                 *gen_l4_cksum = 1;
2700         } else if ((ol_flags & PKT_TX_L4_MASK) ==  PKT_TX_UDP_CKSUM) {
2701                 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
2702                 *gen_l4_cksum = 1;
2703         } else {
2704                 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
2705                 /* no checksum for other type */
2706                 *gen_l4_cksum = 0;
2707         }
2708 }
2709
2710 /**
2711  * Release already sent buffers to bpool (buffer-pool).
2712  *
2713  * @param ppio
2714  *   Pointer to the port structure.
2715  * @param hif
2716  *   Pointer to the MUSDK hardware interface.
2717  * @param sq
2718  *   Pointer to the shadow queue.
2719  * @param qid
2720  *   Queue id number.
2721  * @param force
2722  *   Force releasing packets.
2723  */
2724 static inline void
2725 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
2726                        unsigned int core_id, struct mrvl_shadow_txq *sq,
2727                        int qid, int force)
2728 {
2729         struct buff_release_entry *entry;
2730         uint16_t nb_done = 0, num = 0, skip_bufs = 0;
2731         int i;
2732
2733         pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
2734
2735         sq->num_to_release += nb_done;
2736
2737         if (likely(!force &&
2738                    sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
2739                 return;
2740
2741         nb_done = sq->num_to_release;
2742         sq->num_to_release = 0;
2743
2744         for (i = 0; i < nb_done; i++) {
2745                 entry = &sq->ent[sq->tail + num];
2746                 if (unlikely(!entry->buff.addr)) {
2747                         MRVL_LOG(ERR,
2748                                 "Shadow memory @%d: cookie(%lx), pa(%lx)!",
2749                                 sq->tail, (u64)entry->buff.cookie,
2750                                 (u64)entry->buff.addr);
2751                         skip_bufs = 1;
2752                         goto skip;
2753                 }
2754
2755                 if (unlikely(!entry->bpool)) {
2756                         struct rte_mbuf *mbuf;
2757
2758                         mbuf = (struct rte_mbuf *)entry->buff.cookie;
2759                         rte_pktmbuf_free(mbuf);
2760                         skip_bufs = 1;
2761                         goto skip;
2762                 }
2763
2764                 mrvl_port_bpool_size
2765                         [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
2766                 num++;
2767                 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
2768                         goto skip;
2769                 continue;
2770 skip:
2771                 if (likely(num))
2772                         pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2773                 num += skip_bufs;
2774                 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2775                 sq->size -= num;
2776                 num = 0;
2777                 skip_bufs = 0;
2778         }
2779
2780         if (likely(num)) {
2781                 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2782                 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2783                 sq->size -= num;
2784         }
2785 }
2786
2787 /**
2788  * DPDK callback for transmit.
2789  *
2790  * @param txq
2791  *   Generic pointer transmit queue.
2792  * @param tx_pkts
2793  *   Packets to transmit.
2794  * @param nb_pkts
2795  *   Number of packets in array.
2796  *
2797  * @return
2798  *   Number of packets successfully transmitted.
2799  */
2800 static uint16_t
2801 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2802 {
2803         struct mrvl_txq *q = txq;
2804         struct mrvl_shadow_txq *sq;
2805         struct pp2_hif *hif;
2806         struct pp2_ppio_desc descs[nb_pkts];
2807         unsigned int core_id = rte_lcore_id();
2808         int i, bytes_sent = 0;
2809         uint16_t num, sq_free_size;
2810         uint64_t addr;
2811
2812         hif = mrvl_get_hif(q->priv, core_id);
2813         sq = &q->shadow_txqs[core_id];
2814
2815         if (unlikely(!q->priv->ppio || !hif))
2816                 return 0;
2817
2818         if (sq->size)
2819                 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2820                                        sq, q->queue_id, 0);
2821
2822         sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2823         if (unlikely(nb_pkts > sq_free_size))
2824                 nb_pkts = sq_free_size;
2825
2826         for (i = 0; i < nb_pkts; i++) {
2827                 struct rte_mbuf *mbuf = tx_pkts[i];
2828                 int gen_l3_cksum, gen_l4_cksum;
2829                 enum pp2_outq_l3_type l3_type;
2830                 enum pp2_outq_l4_type l4_type;
2831
2832                 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2833                         struct rte_mbuf *pref_pkt_hdr;
2834
2835                         pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2836                         rte_mbuf_prefetch_part1(pref_pkt_hdr);
2837                         rte_mbuf_prefetch_part2(pref_pkt_hdr);
2838                 }
2839
2840                 mrvl_fill_shadowq(sq, mbuf);
2841                 mrvl_fill_desc(&descs[i], mbuf);
2842
2843                 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2844                 /*
2845                  * in case unsupported ol_flags were passed
2846                  * do not update descriptor offload information
2847                  */
2848                 if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS))
2849                         continue;
2850                 mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type,
2851                                         &gen_l3_cksum, &gen_l4_cksum);
2852
2853                 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2854                                                   mbuf->l2_len,
2855                                                   mbuf->l2_len + mbuf->l3_len,
2856                                                   gen_l3_cksum, gen_l4_cksum);
2857         }
2858
2859         num = nb_pkts;
2860         pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2861         /* number of packets that were not sent */
2862         if (unlikely(num > nb_pkts)) {
2863                 for (i = nb_pkts; i < num; i++) {
2864                         sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2865                                 MRVL_PP2_TX_SHADOWQ_MASK;
2866                         addr = sq->ent[sq->head].buff.cookie;
2867                         bytes_sent -=
2868                                 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2869                 }
2870                 sq->size -= num - nb_pkts;
2871         }
2872
2873         q->bytes_sent += bytes_sent;
2874
2875         return nb_pkts;
2876 }
2877
2878 /** DPDK callback for S/G transmit.
2879  *
2880  * @param txq
2881  *   Generic pointer transmit queue.
2882  * @param tx_pkts
2883  *   Packets to transmit.
2884  * @param nb_pkts
2885  *   Number of packets in array.
2886  *
2887  * @return
2888  *   Number of packets successfully transmitted.
2889  */
2890 static uint16_t
2891 mrvl_tx_sg_pkt_burst(void *txq, struct rte_mbuf **tx_pkts,
2892                      uint16_t nb_pkts)
2893 {
2894         struct mrvl_txq *q = txq;
2895         struct mrvl_shadow_txq *sq;
2896         struct pp2_hif *hif;
2897         struct pp2_ppio_desc descs[nb_pkts * PP2_PPIO_DESC_NUM_FRAGS];
2898         struct pp2_ppio_sg_pkts pkts;
2899         uint8_t frags[nb_pkts];
2900         unsigned int core_id = rte_lcore_id();
2901         int i, j, bytes_sent = 0;
2902         int tail, tail_first;
2903         uint16_t num, sq_free_size;
2904         uint16_t nb_segs, total_descs = 0;
2905         uint64_t addr;
2906
2907         hif = mrvl_get_hif(q->priv, core_id);
2908         sq = &q->shadow_txqs[core_id];
2909         pkts.frags = frags;
2910         pkts.num = 0;
2911
2912         if (unlikely(!q->priv->ppio || !hif))
2913                 return 0;
2914
2915         if (sq->size)
2916                 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2917                                        sq, q->queue_id, 0);
2918
2919         /* Save shadow queue free size */
2920         sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2921
2922         tail = 0;
2923         for (i = 0; i < nb_pkts; i++) {
2924                 struct rte_mbuf *mbuf = tx_pkts[i];
2925                 struct rte_mbuf *seg = NULL;
2926                 int gen_l3_cksum, gen_l4_cksum;
2927                 enum pp2_outq_l3_type l3_type;
2928                 enum pp2_outq_l4_type l4_type;
2929
2930                 nb_segs = mbuf->nb_segs;
2931                 tail_first = tail;
2932                 total_descs += nb_segs;
2933
2934                 /*
2935                  * Check if total_descs does not exceed
2936                  * shadow queue free size
2937                  */
2938                 if (unlikely(total_descs > sq_free_size)) {
2939                         total_descs -= nb_segs;
2940                         break;
2941                 }
2942
2943                 /* Check if nb_segs does not exceed the max nb of desc per
2944                  * fragmented packet
2945                  */
2946                 if (nb_segs > PP2_PPIO_DESC_NUM_FRAGS) {
2947                         total_descs -= nb_segs;
2948                         RTE_LOG(ERR, PMD,
2949                                 "Too many segments. Packet won't be sent.\n");
2950                         break;
2951                 }
2952
2953                 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2954                         struct rte_mbuf *pref_pkt_hdr;
2955
2956                         pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2957                         rte_mbuf_prefetch_part1(pref_pkt_hdr);
2958                         rte_mbuf_prefetch_part2(pref_pkt_hdr);
2959                 }
2960
2961                 pkts.frags[pkts.num] = nb_segs;
2962                 pkts.num++;
2963
2964                 seg = mbuf;
2965                 for (j = 0; j < nb_segs - 1; j++) {
2966                         /* For the subsequent segments, set shadow queue
2967                          * buffer to NULL
2968                          */
2969                         mrvl_fill_shadowq(sq, NULL);
2970                         mrvl_fill_desc(&descs[tail], seg);
2971
2972                         tail++;
2973                         seg = seg->next;
2974                 }
2975                 /* Put first mbuf info in last shadow queue entry */
2976                 mrvl_fill_shadowq(sq, mbuf);
2977                 /* Update descriptor with last segment */
2978                 mrvl_fill_desc(&descs[tail++], seg);
2979
2980                 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2981                 /* In case unsupported ol_flags were passed
2982                  * do not update descriptor offload information
2983                  */
2984                 if (!(mbuf->ol_flags & MRVL_TX_PKT_OFFLOADS))
2985                         continue;
2986                 mrvl_prepare_proto_info(mbuf->ol_flags, &l3_type, &l4_type,
2987                                         &gen_l3_cksum, &gen_l4_cksum);
2988
2989                 pp2_ppio_outq_desc_set_proto_info(&descs[tail_first], l3_type,
2990                                                   l4_type, mbuf->l2_len,
2991                                                   mbuf->l2_len + mbuf->l3_len,
2992                                                   gen_l3_cksum, gen_l4_cksum);
2993         }
2994
2995         num = total_descs;
2996         pp2_ppio_send_sg(q->priv->ppio, hif, q->queue_id, descs,
2997                          &total_descs, &pkts);
2998         /* number of packets that were not sent */
2999         if (unlikely(num > total_descs)) {
3000                 for (i = total_descs; i < num; i++) {
3001                         sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
3002                                 MRVL_PP2_TX_SHADOWQ_MASK;
3003
3004                         addr = sq->ent[sq->head].buff.cookie;
3005                         if (addr)
3006                                 bytes_sent -=
3007                                         rte_pktmbuf_pkt_len((struct rte_mbuf *)
3008                                                 (cookie_addr_high | addr));
3009                 }
3010                 sq->size -= num - total_descs;
3011                 nb_pkts = pkts.num;
3012         }
3013
3014         q->bytes_sent += bytes_sent;
3015
3016         return nb_pkts;
3017 }
3018
3019 /**
3020  * Create private device structure.
3021  *
3022  * @param dev_name
3023  *   Pointer to the port name passed in the initialization parameters.
3024  *
3025  * @return
3026  *   Pointer to the newly allocated private device structure.
3027  */
3028 static struct mrvl_priv *
3029 mrvl_priv_create(const char *dev_name)
3030 {
3031         struct pp2_bpool_params bpool_params;
3032         char match[MRVL_MATCH_LEN];
3033         struct mrvl_priv *priv;
3034         uint16_t max_frame_size;
3035         int ret, bpool_bit;
3036
3037         priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
3038         if (!priv)
3039                 return NULL;
3040
3041         ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
3042                                        &priv->pp_id, &priv->ppio_id);
3043         if (ret)
3044                 goto out_free_priv;
3045
3046         ret = pp2_ppio_get_l4_cksum_max_frame_size(priv->pp_id, priv->ppio_id,
3047                                                    &max_frame_size);
3048         if (ret)
3049                 goto out_free_priv;
3050
3051         priv->max_mtu = max_frame_size + RTE_ETHER_CRC_LEN -
3052                 MRVL_PP2_ETH_HDRS_LEN;
3053
3054         bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
3055                                      PP2_BPOOL_NUM_POOLS);
3056         if (bpool_bit < 0)
3057                 goto out_free_priv;
3058         priv->bpool_bit = bpool_bit;
3059
3060         snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
3061                  priv->bpool_bit);
3062         memset(&bpool_params, 0, sizeof(bpool_params));
3063         bpool_params.match = match;
3064         bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
3065         ret = pp2_bpool_init(&bpool_params, &priv->bpool);
3066         if (ret)
3067                 goto out_clear_bpool_bit;
3068
3069         priv->ppio_params.type = PP2_PPIO_T_NIC;
3070         rte_spinlock_init(&priv->lock);
3071
3072         return priv;
3073 out_clear_bpool_bit:
3074         used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
3075 out_free_priv:
3076         rte_free(priv);
3077         return NULL;
3078 }
3079
3080 /**
3081  * Create device representing Ethernet port.
3082  *
3083  * @param name
3084  *   Pointer to the port's name.
3085  *
3086  * @return
3087  *   0 on success, negative error value otherwise.
3088  */
3089 static int
3090 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
3091 {
3092         int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
3093         struct rte_eth_dev *eth_dev;
3094         struct mrvl_priv *priv;
3095         struct ifreq req;
3096
3097         eth_dev = rte_eth_dev_allocate(name);
3098         if (!eth_dev)
3099                 return -ENOMEM;
3100
3101         priv = mrvl_priv_create(name);
3102         if (!priv) {
3103                 ret = -ENOMEM;
3104                 goto out_free;
3105         }
3106         eth_dev->data->dev_private = priv;
3107
3108         eth_dev->data->mac_addrs =
3109                 rte_zmalloc("mac_addrs",
3110                             RTE_ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
3111         if (!eth_dev->data->mac_addrs) {
3112                 MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
3113                 ret = -ENOMEM;
3114                 goto out_free;
3115         }
3116
3117         memset(&req, 0, sizeof(req));
3118         strcpy(req.ifr_name, name);
3119         ret = ioctl(fd, SIOCGIFHWADDR, &req);
3120         if (ret)
3121                 goto out_free;
3122
3123         memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
3124                req.ifr_addr.sa_data, RTE_ETHER_ADDR_LEN);
3125
3126         eth_dev->device = &vdev->device;
3127         eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
3128         mrvl_set_tx_function(eth_dev);
3129         eth_dev->dev_ops = &mrvl_ops;
3130         eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
3131
3132         eth_dev->data->dev_link.link_status = ETH_LINK_UP;
3133
3134         rte_eth_dev_probing_finish(eth_dev);
3135         return 0;
3136 out_free:
3137         rte_eth_dev_release_port(eth_dev);
3138
3139         return ret;
3140 }
3141
3142 /**
3143  * Callback used by rte_kvargs_process() during argument parsing.
3144  *
3145  * @param key
3146  *   Pointer to the parsed key (unused).
3147  * @param value
3148  *   Pointer to the parsed value.
3149  * @param extra_args
3150  *   Pointer to the extra arguments which contains address of the
3151  *   table of pointers to parsed interface names.
3152  *
3153  * @return
3154  *   Always 0.
3155  */
3156 static int
3157 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
3158                  void *extra_args)
3159 {
3160         struct mrvl_ifnames *ifnames = extra_args;
3161
3162         ifnames->names[ifnames->idx++] = value;
3163
3164         return 0;
3165 }
3166
3167 /**
3168  * DPDK callback to register the virtual device.
3169  *
3170  * @param vdev
3171  *   Pointer to the virtual device.
3172  *
3173  * @return
3174  *   0 on success, negative error value otherwise.
3175  */
3176 static int
3177 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
3178 {
3179         struct rte_kvargs *kvlist;
3180         struct mrvl_ifnames ifnames;
3181         int ret = -EINVAL;
3182         uint32_t i, ifnum, cfgnum;
3183         const char *params;
3184
3185         params = rte_vdev_device_args(vdev);
3186         if (!params)
3187                 return -EINVAL;
3188
3189         kvlist = rte_kvargs_parse(params, valid_args);
3190         if (!kvlist)
3191                 return -EINVAL;
3192
3193         ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
3194         if (ifnum > RTE_DIM(ifnames.names))
3195                 goto out_free_kvlist;
3196
3197         ifnames.idx = 0;
3198         rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
3199                            mrvl_get_ifnames, &ifnames);
3200
3201
3202         /*
3203          * The below system initialization should be done only once,
3204          * on the first provided configuration file
3205          */
3206         if (!mrvl_cfg) {
3207                 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
3208                 MRVL_LOG(INFO, "Parsing config file!");
3209                 if (cfgnum > 1) {
3210                         MRVL_LOG(ERR, "Cannot handle more than one config file!");
3211                         goto out_free_kvlist;
3212                 } else if (cfgnum == 1) {
3213                         rte_kvargs_process(kvlist, MRVL_CFG_ARG,
3214                                            mrvl_get_cfg, &mrvl_cfg);
3215                 }
3216         }
3217
3218         if (mrvl_dev_num)
3219                 goto init_devices;
3220
3221         MRVL_LOG(INFO, "Perform MUSDK initializations");
3222
3223         ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
3224         if (ret)
3225                 goto out_free_kvlist;
3226
3227         ret = mrvl_init_pp2();
3228         if (ret) {
3229                 MRVL_LOG(ERR, "Failed to init PP!");
3230                 rte_mvep_deinit(MVEP_MOD_T_PP2);
3231                 goto out_free_kvlist;
3232         }
3233
3234         memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
3235         memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
3236
3237         mrvl_lcore_first = RTE_MAX_LCORE;
3238         mrvl_lcore_last = 0;
3239
3240 init_devices:
3241         for (i = 0; i < ifnum; i++) {
3242                 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
3243                 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
3244                 if (ret)
3245                         goto out_cleanup;
3246                 mrvl_dev_num++;
3247         }
3248
3249         rte_kvargs_free(kvlist);
3250
3251         return 0;
3252 out_cleanup:
3253         rte_pmd_mrvl_remove(vdev);
3254
3255 out_free_kvlist:
3256         rte_kvargs_free(kvlist);
3257
3258         return ret;
3259 }
3260
3261 /**
3262  * DPDK callback to remove virtual device.
3263  *
3264  * @param vdev
3265  *   Pointer to the removed virtual device.
3266  *
3267  * @return
3268  *   0 on success, negative error value otherwise.
3269  */
3270 static int
3271 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
3272 {
3273         uint16_t port_id;
3274         int ret = 0;
3275
3276         RTE_ETH_FOREACH_DEV(port_id) {
3277                 if (rte_eth_devices[port_id].device != &vdev->device)
3278                         continue;
3279                 ret |= rte_eth_dev_close(port_id);
3280         }
3281
3282         return ret == 0 ? 0 : -EIO;
3283 }
3284
3285 static struct rte_vdev_driver pmd_mrvl_drv = {
3286         .probe = rte_pmd_mrvl_probe,
3287         .remove = rte_pmd_mrvl_remove,
3288 };
3289
3290 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
3291 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
3292 RTE_LOG_REGISTER(mrvl_logtype, pmd.net.mvpp2, NOTICE);