4 * Copyright(c) 2017 Semihalf. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Semihalf nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_ethdev.h>
34 #include <rte_kvargs.h>
36 #include <rte_malloc.h>
39 /* Unluckily, container_of is defined by both DPDK and MUSDK,
40 * we'll declare only one version.
42 * Note that it is not used in this PMD anyway.
48 #include <drivers/mv_pp2.h>
49 #include <drivers/mv_pp2_bpool.h>
50 #include <drivers/mv_pp2_hif.h>
53 #include <linux/ethtool.h>
54 #include <linux/sockios.h>
56 #include <net/if_arp.h>
57 #include <sys/ioctl.h>
58 #include <sys/socket.h>
60 #include <sys/types.h>
62 #include "mrvl_ethdev.h"
65 /* bitmask with reserved hifs */
66 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
67 /* bitmask with reserved bpools */
68 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
69 /* maximum number of available hifs */
70 #define MRVL_MUSDK_HIFS_MAX 9
73 #define MRVL_MUSDK_PREFETCH_SHIFT 2
75 /* TCAM has 25 entries reserved for uc/mc filter entries */
76 #define MRVL_MAC_ADDRS_MAX 25
77 #define MRVL_MATCH_LEN 16
78 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
79 /* Maximum allowable packet size */
80 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
82 #define MRVL_IFACE_NAME_ARG "iface"
83 #define MRVL_CFG_ARG "cfg"
85 #define MRVL_BURST_SIZE 64
87 #define MRVL_ARP_LENGTH 28
89 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
91 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
92 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
94 static const char * const valid_args[] = {
100 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
101 static struct pp2_hif *hifs[RTE_MAX_LCORE];
102 static int used_bpools[PP2_NUM_PKT_PROC] = {
103 MRVL_MUSDK_BPOOLS_RESERVED,
104 MRVL_MUSDK_BPOOLS_RESERVED
107 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
108 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
109 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
112 * To use buffer harvesting based on loopback port shadow queue structure
113 * was introduced for buffers information bookkeeping.
115 * Before sending the packet, related buffer information (pp2_buff_inf) is
116 * stored in shadow queue. After packet is transmitted no longer used
117 * packet buffer is released back to it's original hardware pool,
118 * on condition it originated from interface.
119 * In case it was generated by application itself i.e: mbuf->port field is
120 * 0xff then its released to software mempool.
122 struct mrvl_shadow_txq {
123 int head; /* write index - used when sending buffers */
124 int tail; /* read index - used when releasing buffers */
125 u16 size; /* queue occupied size */
126 u16 num_to_release; /* number of buffers sent, that can be released */
127 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
131 struct mrvl_priv *priv;
132 struct rte_mempool *mp;
138 struct mrvl_priv *priv;
144 * Every tx queue should have dedicated shadow tx queue.
146 * Ports assigned by DPDK might not start at zero or be continuous so
147 * as a workaround define shadow queues for each possible port so that
148 * we eventually fit somewhere.
150 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
152 /** Number of ports configured. */
154 static int mrvl_lcore_first;
155 static int mrvl_lcore_last;
158 mrvl_get_bpool_size(int pp2_id, int pool_id)
163 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
164 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
170 mrvl_reserve_bit(int *bitmap, int max)
172 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
183 * Ethernet device configuration.
185 * Prepare the driver for a given number of TX and RX queues.
188 * Pointer to Ethernet device structure.
191 * 0 on success, negative error value otherwise.
194 mrvl_dev_configure(struct rte_eth_dev *dev)
196 struct mrvl_priv *priv = dev->data->dev_private;
199 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
200 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
201 dev->data->dev_conf.rxmode.mq_mode);
205 if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
207 "L2 CRC stripping is always enabled in hw\n");
208 dev->data->dev_conf.rxmode.hw_strip_crc = 1;
211 if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
212 RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
216 if (dev->data->dev_conf.rxmode.split_hdr_size) {
217 RTE_LOG(INFO, PMD, "Split headers not supported\n");
221 if (dev->data->dev_conf.rxmode.enable_scatter) {
222 RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
226 if (dev->data->dev_conf.rxmode.enable_lro) {
227 RTE_LOG(INFO, PMD, "LRO not supported\n");
231 if (dev->data->dev_conf.rxmode.jumbo_frame)
232 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
233 ETHER_HDR_LEN - ETHER_CRC_LEN;
235 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
236 dev->data->nb_rx_queues);
240 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
241 priv->nb_rx_queues = dev->data->nb_rx_queues;
247 * DPDK callback to change the MTU.
249 * Setting the MTU affects hardware MRU (packets larger than the MRU
253 * Pointer to Ethernet device structure.
258 * 0 on success, negative error value otherwise.
261 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
263 struct mrvl_priv *priv = dev->data->dev_private;
264 /* extra MV_MH_SIZE bytes are required for Marvell tag */
265 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
268 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
271 ret = pp2_ppio_set_mru(priv->ppio, mru);
275 return pp2_ppio_set_mtu(priv->ppio, mtu);
279 * DPDK callback to bring the link up.
282 * Pointer to Ethernet device structure.
285 * 0 on success, negative error value otherwise.
288 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
290 struct mrvl_priv *priv = dev->data->dev_private;
293 ret = pp2_ppio_enable(priv->ppio);
298 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
299 * as pp2_ppio_enable() changes port->t_mode from default 0 to
300 * PP2_TRAFFIC_INGRESS_EGRESS.
302 * Set mtu to default DPDK value here.
304 ret = mrvl_mtu_set(dev, dev->data->mtu);
306 pp2_ppio_disable(priv->ppio);
308 dev->data->dev_link.link_status = ETH_LINK_UP;
314 * DPDK callback to bring the link down.
317 * Pointer to Ethernet device structure.
320 * 0 on success, negative error value otherwise.
323 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
325 struct mrvl_priv *priv = dev->data->dev_private;
328 ret = pp2_ppio_disable(priv->ppio);
332 dev->data->dev_link.link_status = ETH_LINK_DOWN;
338 * DPDK callback to start the device.
341 * Pointer to Ethernet device structure.
344 * 0 on success, negative errno value on failure.
347 mrvl_dev_start(struct rte_eth_dev *dev)
349 struct mrvl_priv *priv = dev->data->dev_private;
350 char match[MRVL_MATCH_LEN];
353 snprintf(match, sizeof(match), "ppio-%d:%d",
354 priv->pp_id, priv->ppio_id);
355 priv->ppio_params.match = match;
358 * Calculate the maximum bpool size for refill feature to 1.5 of the
359 * configured size. In case the bpool size will exceed this value,
360 * superfluous buffers will be removed
362 priv->bpool_max_size = priv->bpool_init_size +
363 (priv->bpool_init_size >> 1);
365 * Calculate the minimum bpool size for refill feature as follows:
366 * 2 default burst sizes multiply by number of rx queues.
367 * If the bpool size will be below this value, new buffers will
368 * be added to the pool.
370 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
372 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
377 * In case there are some some stale uc/mc mac addresses flush them
378 * here. It cannot be done during mrvl_dev_close() as port information
379 * is already gone at that point (due to pp2_ppio_deinit() in
382 if (!priv->uc_mc_flushed) {
383 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
386 "Failed to flush uc/mc filter list\n");
389 priv->uc_mc_flushed = 1;
392 /* For default QoS config, don't start classifier. */
394 ret = mrvl_start_qos_mapping(priv);
396 pp2_ppio_deinit(priv->ppio);
401 ret = mrvl_dev_set_link_up(dev);
407 pp2_ppio_deinit(priv->ppio);
412 * Flush receive queues.
415 * Pointer to Ethernet device structure.
418 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
422 RTE_LOG(INFO, PMD, "Flushing rx queues\n");
423 for (i = 0; i < dev->data->nb_rx_queues; i++) {
427 struct mrvl_rxq *q = dev->data->rx_queues[i];
428 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
430 num = MRVL_PP2_RXD_MAX;
431 ret = pp2_ppio_recv(q->priv->ppio,
432 q->priv->rxq_map[q->queue_id].tc,
433 q->priv->rxq_map[q->queue_id].inq,
434 descs, (uint16_t *)&num);
435 } while (ret == 0 && num);
440 * Flush transmit shadow queues.
443 * Pointer to Ethernet device structure.
446 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
450 RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
451 for (i = 0; i < RTE_MAX_LCORE; i++) {
452 struct mrvl_shadow_txq *sq =
453 &shadow_txqs[dev->data->port_id][i];
455 while (sq->tail != sq->head) {
456 uint64_t addr = cookie_addr_high |
457 sq->ent[sq->tail].buff.cookie;
458 rte_pktmbuf_free((struct rte_mbuf *)addr);
459 sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
462 memset(sq, 0, sizeof(*sq));
467 * Flush hardware bpool (buffer-pool).
470 * Pointer to Ethernet device structure.
473 mrvl_flush_bpool(struct rte_eth_dev *dev)
475 struct mrvl_priv *priv = dev->data->dev_private;
479 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
481 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
486 struct pp2_buff_inf inf;
489 ret = pp2_bpool_get_buff(hifs[rte_lcore_id()], priv->bpool,
494 addr = cookie_addr_high | inf.cookie;
495 rte_pktmbuf_free((struct rte_mbuf *)addr);
500 * DPDK callback to stop the device.
503 * Pointer to Ethernet device structure.
506 mrvl_dev_stop(struct rte_eth_dev *dev)
508 struct mrvl_priv *priv = dev->data->dev_private;
510 mrvl_dev_set_link_down(dev);
511 mrvl_flush_rx_queues(dev);
512 mrvl_flush_tx_shadow_queues(dev);
514 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
515 pp2_ppio_deinit(priv->ppio);
520 * DPDK callback to close the device.
523 * Pointer to Ethernet device structure.
526 mrvl_dev_close(struct rte_eth_dev *dev)
528 struct mrvl_priv *priv = dev->data->dev_private;
531 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
532 struct pp2_ppio_tc_params *tc_params =
533 &priv->ppio_params.inqs_params.tcs_params[i];
535 if (tc_params->inqs_params) {
536 rte_free(tc_params->inqs_params);
537 tc_params->inqs_params = NULL;
541 mrvl_flush_bpool(dev);
545 * DPDK callback to retrieve physical link information.
548 * Pointer to Ethernet device structure.
549 * @param wait_to_complete
550 * Wait for request completion (ignored).
553 * 0 on success, negative error value otherwise.
556 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
560 * once MUSDK provides necessary API use it here
562 struct ethtool_cmd edata;
566 edata.cmd = ETHTOOL_GSET;
568 strcpy(req.ifr_name, dev->data->name);
569 req.ifr_data = (void *)&edata;
571 fd = socket(AF_INET, SOCK_DGRAM, 0);
575 ret = ioctl(fd, SIOCETHTOOL, &req);
583 switch (ethtool_cmd_speed(&edata)) {
585 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
588 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
591 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
594 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
597 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
600 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
601 ETH_LINK_HALF_DUPLEX;
602 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
609 * DPDK callback to enable promiscuous mode.
612 * Pointer to Ethernet device structure.
615 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
617 struct mrvl_priv *priv = dev->data->dev_private;
620 ret = pp2_ppio_set_uc_promisc(priv->ppio, 1);
622 RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
626 * DPDK callback to enable allmulti mode.
629 * Pointer to Ethernet device structure.
632 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
634 struct mrvl_priv *priv = dev->data->dev_private;
637 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
639 RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
643 * DPDK callback to disable promiscuous mode.
646 * Pointer to Ethernet device structure.
649 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
651 struct mrvl_priv *priv = dev->data->dev_private;
654 ret = pp2_ppio_set_uc_promisc(priv->ppio, 0);
656 RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
660 * DPDK callback to disable allmulticast mode.
663 * Pointer to Ethernet device structure.
666 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
668 struct mrvl_priv *priv = dev->data->dev_private;
671 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
673 RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
677 * DPDK callback to remove a MAC address.
680 * Pointer to Ethernet device structure.
685 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
687 struct mrvl_priv *priv = dev->data->dev_private;
688 char buf[ETHER_ADDR_FMT_SIZE];
691 ret = pp2_ppio_remove_mac_addr(priv->ppio,
692 dev->data->mac_addrs[index].addr_bytes);
694 ether_format_addr(buf, sizeof(buf),
695 &dev->data->mac_addrs[index]);
696 RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
701 * DPDK callback to add a MAC address.
704 * Pointer to Ethernet device structure.
706 * MAC address to register.
710 * VMDq pool index to associate address with (unused).
713 * 0 on success, negative error value otherwise.
716 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
717 uint32_t index, uint32_t vmdq __rte_unused)
719 struct mrvl_priv *priv = dev->data->dev_private;
720 char buf[ETHER_ADDR_FMT_SIZE];
724 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
728 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
729 * parameter uc_filter_max. Maximum number of mc addresses is then
730 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
733 * If more than uc_filter_max uc addresses were added to filter list
734 * then NIC will switch to promiscuous mode automatically.
736 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
737 * were added to filter list then NIC will switch to all-multicast mode
740 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
742 ether_format_addr(buf, sizeof(buf), mac_addr);
743 RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
751 * DPDK callback to set the primary MAC address.
754 * Pointer to Ethernet device structure.
756 * MAC address to register.
759 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
761 struct mrvl_priv *priv = dev->data->dev_private;
763 pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
766 * Port stops sending packets if pp2_ppio_set_mac_addr()
767 * was called after pp2_ppio_enable(). As a quick fix issue
768 * enable port once again.
770 pp2_ppio_enable(priv->ppio);
774 * DPDK callback to get information about the device.
777 * Pointer to Ethernet device structure (unused).
779 * Info structure output buffer.
782 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
783 struct rte_eth_dev_info *info)
785 info->speed_capa = ETH_LINK_SPEED_10M |
786 ETH_LINK_SPEED_100M |
790 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
791 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
792 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
794 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
795 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
796 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
798 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
799 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
800 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
802 info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
803 /* By default packets are dropped if no descriptors are available */
804 info->default_rxconf.rx_drop_en = 1;
806 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
810 * DPDK callback to get information about specific receive queue.
813 * Pointer to Ethernet device structure.
815 * Receive queue index.
817 * Receive queue information structure.
819 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
820 struct rte_eth_rxq_info *qinfo)
822 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
823 struct mrvl_priv *priv = dev->data->dev_private;
824 int inq = priv->rxq_map[rx_queue_id].inq;
825 int tc = priv->rxq_map[rx_queue_id].tc;
826 struct pp2_ppio_tc_params *tc_params =
827 &priv->ppio_params.inqs_params.tcs_params[tc];
830 qinfo->nb_desc = tc_params->inqs_params[inq].size;
834 * DPDK callback to get information about specific transmit queue.
837 * Pointer to Ethernet device structure.
839 * Transmit queue index.
841 * Transmit queue information structure.
843 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
844 struct rte_eth_txq_info *qinfo)
846 struct mrvl_priv *priv = dev->data->dev_private;
849 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
853 * Release buffers to hardware bpool (buffer-pool)
856 * Receive queue pointer.
858 * Number of buffers to release to bpool.
861 * 0 on success, negative error value otherwise.
864 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
866 struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
867 struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
869 unsigned int core_id = rte_lcore_id();
870 struct pp2_hif *hif = hifs[core_id];
871 struct pp2_bpool *bpool = rxq->priv->bpool;
873 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
877 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
879 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
881 for (i = 0; i < num; i++) {
882 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
883 != cookie_addr_high) {
885 "mbuf virtual addr high 0x%lx out of range\n",
886 (uint64_t)mbufs[i] >> 32);
890 entries[i].buff.addr =
891 rte_mbuf_data_dma_addr_default(mbufs[i]);
892 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
893 entries[i].bpool = bpool;
896 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
897 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
905 rte_pktmbuf_free(mbufs[i]);
911 * DPDK callback to configure the receive queue.
914 * Pointer to Ethernet device structure.
918 * Number of descriptors to configure in queue.
920 * NUMA socket on which memory must be allocated.
922 * Thresholds parameters (unused_).
924 * Memory pool for buffer allocations.
927 * 0 on success, negative error value otherwise.
930 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
932 const struct rte_eth_rxconf *conf __rte_unused,
933 struct rte_mempool *mp)
935 struct mrvl_priv *priv = dev->data->dev_private;
936 struct mrvl_rxq *rxq;
938 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
941 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
943 * Unknown TC mapping, mapping will not have a correct queue.
945 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
950 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
952 if (min_size < max_rx_pkt_len) {
954 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
955 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
961 if (dev->data->rx_queues[idx]) {
962 rte_free(dev->data->rx_queues[idx]);
963 dev->data->rx_queues[idx] = NULL;
966 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
973 rxq->port_id = dev->data->port_id;
974 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
976 tc = priv->rxq_map[rxq->queue_id].tc,
977 inq = priv->rxq_map[rxq->queue_id].inq;
978 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
981 ret = mrvl_fill_bpool(rxq, desc);
987 priv->bpool_init_size += desc;
989 dev->data->rx_queues[idx] = rxq;
995 * DPDK callback to release the receive queue.
998 * Generic receive queue pointer.
1001 mrvl_rx_queue_release(void *rxq)
1003 struct mrvl_rxq *q = rxq;
1004 struct pp2_ppio_tc_params *tc_params;
1005 int i, num, tc, inq;
1010 tc = q->priv->rxq_map[q->queue_id].tc;
1011 inq = q->priv->rxq_map[q->queue_id].inq;
1012 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1013 num = tc_params->inqs_params[inq].size;
1014 for (i = 0; i < num; i++) {
1015 struct pp2_buff_inf inf;
1018 pp2_bpool_get_buff(hifs[rte_lcore_id()], q->priv->bpool, &inf);
1019 addr = cookie_addr_high | inf.cookie;
1020 rte_pktmbuf_free((struct rte_mbuf *)addr);
1027 * DPDK callback to configure the transmit queue.
1030 * Pointer to Ethernet device structure.
1032 * Transmit queue index.
1034 * Number of descriptors to configure in the queue.
1036 * NUMA socket on which memory must be allocated.
1038 * Thresholds parameters (unused).
1041 * 0 on success, negative error value otherwise.
1044 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1045 unsigned int socket,
1046 const struct rte_eth_txconf *conf __rte_unused)
1048 struct mrvl_priv *priv = dev->data->dev_private;
1049 struct mrvl_txq *txq;
1051 if (dev->data->tx_queues[idx]) {
1052 rte_free(dev->data->tx_queues[idx]);
1053 dev->data->tx_queues[idx] = NULL;
1056 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1061 txq->queue_id = idx;
1062 txq->port_id = dev->data->port_id;
1063 dev->data->tx_queues[idx] = txq;
1065 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1066 priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
1072 * DPDK callback to release the transmit queue.
1075 * Generic transmit queue pointer.
1078 mrvl_tx_queue_release(void *txq)
1080 struct mrvl_txq *q = txq;
1088 static const struct eth_dev_ops mrvl_ops = {
1089 .dev_configure = mrvl_dev_configure,
1090 .dev_start = mrvl_dev_start,
1091 .dev_stop = mrvl_dev_stop,
1092 .dev_set_link_up = mrvl_dev_set_link_up,
1093 .dev_set_link_down = mrvl_dev_set_link_down,
1094 .dev_close = mrvl_dev_close,
1095 .link_update = mrvl_link_update,
1096 .promiscuous_enable = mrvl_promiscuous_enable,
1097 .allmulticast_enable = mrvl_allmulticast_enable,
1098 .promiscuous_disable = mrvl_promiscuous_disable,
1099 .allmulticast_disable = mrvl_allmulticast_disable,
1100 .mac_addr_remove = mrvl_mac_addr_remove,
1101 .mac_addr_add = mrvl_mac_addr_add,
1102 .mac_addr_set = mrvl_mac_addr_set,
1103 .mtu_set = mrvl_mtu_set,
1104 .dev_infos_get = mrvl_dev_infos_get,
1105 .rxq_info_get = mrvl_rxq_info_get,
1106 .txq_info_get = mrvl_txq_info_get,
1107 .rx_queue_setup = mrvl_rx_queue_setup,
1108 .rx_queue_release = mrvl_rx_queue_release,
1109 .tx_queue_setup = mrvl_tx_queue_setup,
1110 .tx_queue_release = mrvl_tx_queue_release,
1114 * DPDK callback for receive.
1117 * Generic pointer to the receive queue.
1119 * Array to store received packets.
1121 * Maximum number of packets in array.
1124 * Number of packets successfully received.
1127 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1129 struct mrvl_rxq *q = rxq;
1130 struct pp2_ppio_desc descs[nb_pkts];
1131 struct pp2_bpool *bpool;
1132 int i, ret, rx_done = 0;
1134 unsigned int core_id = rte_lcore_id();
1136 if (unlikely(!q->priv->ppio))
1139 bpool = q->priv->bpool;
1141 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
1142 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
1143 if (unlikely(ret < 0)) {
1144 RTE_LOG(ERR, PMD, "Failed to receive packets\n");
1147 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
1149 for (i = 0; i < nb_pkts; i++) {
1150 struct rte_mbuf *mbuf;
1151 enum pp2_inq_desc_status status;
1154 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1155 struct pp2_ppio_desc *pref_desc;
1158 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
1159 pref_addr = cookie_addr_high |
1160 pp2_ppio_inq_desc_get_cookie(pref_desc);
1161 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
1162 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
1165 addr = cookie_addr_high |
1166 pp2_ppio_inq_desc_get_cookie(&descs[i]);
1167 mbuf = (struct rte_mbuf *)addr;
1168 rte_pktmbuf_reset(mbuf);
1170 /* drop packet in case of mac, overrun or resource error */
1171 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
1172 if (unlikely(status != PP2_DESC_ERR_OK)) {
1173 struct pp2_buff_inf binf = {
1174 .addr = rte_mbuf_data_dma_addr_default(mbuf),
1175 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
1178 pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
1179 mrvl_port_bpool_size
1180 [bpool->pp2_id][bpool->id][core_id]++;
1184 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
1185 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
1186 mbuf->data_len = mbuf->pkt_len;
1187 mbuf->port = q->port_id;
1189 rx_pkts[rx_done++] = mbuf;
1192 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
1193 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
1195 if (unlikely(num <= q->priv->bpool_min_size ||
1196 (!rx_done && num < q->priv->bpool_init_size))) {
1197 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
1199 RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
1200 } else if (unlikely(num > q->priv->bpool_max_size)) {
1202 int pkt_to_remove = num - q->priv->bpool_init_size;
1203 struct rte_mbuf *mbuf;
1204 struct pp2_buff_inf buff;
1207 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
1208 bpool->pp2_id, q->priv->ppio->port_id,
1209 bpool->id, pkt_to_remove, num,
1210 q->priv->bpool_init_size);
1212 for (i = 0; i < pkt_to_remove; i++) {
1213 pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
1214 mbuf = (struct rte_mbuf *)
1215 (cookie_addr_high | buff.cookie);
1216 rte_pktmbuf_free(mbuf);
1218 mrvl_port_bpool_size
1219 [bpool->pp2_id][bpool->id][core_id] -=
1222 rte_spinlock_unlock(&q->priv->lock);
1229 * Release already sent buffers to bpool (buffer-pool).
1232 * Pointer to the port structure.
1234 * Pointer to the MUSDK hardware interface.
1236 * Pointer to the shadow queue.
1240 * Force releasing packets.
1243 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
1244 struct mrvl_shadow_txq *sq, int qid, int force)
1246 struct buff_release_entry *entry;
1247 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
1248 int i, core_id = rte_lcore_id();
1250 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
1252 sq->num_to_release += nb_done;
1254 if (likely(!force &&
1255 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
1258 nb_done = sq->num_to_release;
1259 sq->num_to_release = 0;
1261 for (i = 0; i < nb_done; i++) {
1262 entry = &sq->ent[sq->tail + num];
1263 if (unlikely(!entry->buff.addr)) {
1265 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
1266 sq->tail, (u64)entry->buff.cookie,
1267 (u64)entry->buff.addr);
1272 if (unlikely(!entry->bpool)) {
1273 struct rte_mbuf *mbuf;
1275 mbuf = (struct rte_mbuf *)
1276 (cookie_addr_high | entry->buff.cookie);
1277 rte_pktmbuf_free(mbuf);
1282 mrvl_port_bpool_size
1283 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
1285 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
1290 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1292 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1298 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1299 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1305 * DPDK callback for transmit.
1308 * Generic pointer transmit queue.
1310 * Packets to transmit.
1312 * Number of packets in array.
1315 * Number of packets successfully transmitted.
1318 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1320 struct mrvl_txq *q = txq;
1321 struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
1322 struct pp2_hif *hif = hifs[rte_lcore_id()];
1323 struct pp2_ppio_desc descs[nb_pkts];
1325 uint16_t num, sq_free_size;
1327 if (unlikely(!q->priv->ppio))
1331 mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
1333 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
1334 if (unlikely(nb_pkts > sq_free_size)) {
1336 "No room in shadow queue for %d packets! %d packets will be sent.\n",
1337 nb_pkts, sq_free_size);
1338 nb_pkts = sq_free_size;
1341 for (i = 0; i < nb_pkts; i++) {
1342 struct rte_mbuf *mbuf = tx_pkts[i];
1344 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1345 struct rte_mbuf *pref_pkt_hdr;
1347 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
1348 rte_mbuf_prefetch_part1(pref_pkt_hdr);
1349 rte_mbuf_prefetch_part2(pref_pkt_hdr);
1352 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
1353 sq->ent[sq->head].buff.addr =
1354 rte_mbuf_data_dma_addr_default(mbuf);
1355 sq->ent[sq->head].bpool =
1356 (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
1357 NULL : mrvl_port_to_bpool_lookup[mbuf->port];
1358 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
1361 pp2_ppio_outq_desc_reset(&descs[i]);
1362 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
1363 rte_pktmbuf_mtophys(mbuf));
1364 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
1365 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
1366 rte_pktmbuf_pkt_len(mbuf));
1370 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
1371 /* number of packets that were not sent */
1372 if (unlikely(num > nb_pkts)) {
1373 for (i = nb_pkts; i < num; i++) {
1374 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
1375 MRVL_PP2_TX_SHADOWQ_MASK;
1377 sq->size -= num - nb_pkts;
1384 * Initialize packet processor.
1387 * 0 on success, negative error value otherwise.
1392 struct pp2_init_params init_params;
1394 memset(&init_params, 0, sizeof(init_params));
1395 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
1396 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
1398 return pp2_init(&init_params);
1402 * Deinitialize packet processor.
1405 * 0 on success, negative error value otherwise.
1408 mrvl_deinit_pp2(void)
1414 * Create private device structure.
1417 * Pointer to the port name passed in the initialization parameters.
1420 * Pointer to the newly allocated private device structure.
1422 static struct mrvl_priv *
1423 mrvl_priv_create(const char *dev_name)
1425 struct pp2_bpool_params bpool_params;
1426 char match[MRVL_MATCH_LEN];
1427 struct mrvl_priv *priv;
1430 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
1434 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
1435 &priv->pp_id, &priv->ppio_id);
1439 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
1440 PP2_BPOOL_NUM_POOLS);
1443 priv->bpool_bit = bpool_bit;
1445 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
1447 memset(&bpool_params, 0, sizeof(bpool_params));
1448 bpool_params.match = match;
1449 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
1450 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
1452 goto out_clear_bpool_bit;
1454 priv->ppio_params.type = PP2_PPIO_T_NIC;
1455 rte_spinlock_init(&priv->lock);
1458 out_clear_bpool_bit:
1459 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
1466 * Create device representing Ethernet port.
1469 * Pointer to the port's name.
1472 * 0 on success, negative error value otherwise.
1475 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
1477 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
1478 struct rte_eth_dev *eth_dev;
1479 struct mrvl_priv *priv;
1482 eth_dev = rte_eth_dev_allocate(name);
1486 priv = mrvl_priv_create(name);
1492 eth_dev->data->mac_addrs =
1493 rte_zmalloc("mac_addrs",
1494 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
1495 if (!eth_dev->data->mac_addrs) {
1496 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
1501 memset(&req, 0, sizeof(req));
1502 strcpy(req.ifr_name, name);
1503 ret = ioctl(fd, SIOCGIFHWADDR, &req);
1507 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1508 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
1510 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
1511 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
1512 eth_dev->data->dev_private = priv;
1513 eth_dev->device = &vdev->device;
1514 eth_dev->dev_ops = &mrvl_ops;
1518 rte_free(eth_dev->data->mac_addrs);
1520 rte_eth_dev_release_port(eth_dev);
1528 * Cleanup previously created device representing Ethernet port.
1531 * Pointer to the port name.
1534 mrvl_eth_dev_destroy(const char *name)
1536 struct rte_eth_dev *eth_dev;
1537 struct mrvl_priv *priv;
1539 eth_dev = rte_eth_dev_allocated(name);
1543 priv = eth_dev->data->dev_private;
1544 pp2_bpool_deinit(priv->bpool);
1546 rte_free(eth_dev->data->mac_addrs);
1547 rte_eth_dev_release_port(eth_dev);
1551 * Callback used by rte_kvargs_process() during argument parsing.
1554 * Pointer to the parsed key (unused).
1556 * Pointer to the parsed value.
1558 * Pointer to the extra arguments which contains address of the
1559 * table of pointers to parsed interface names.
1565 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
1568 const char **ifnames = extra_args;
1570 ifnames[mrvl_ports_nb++] = value;
1576 * Initialize per-lcore MUSDK hardware interfaces (hifs).
1579 * 0 on success, negative error value otherwise.
1582 mrvl_init_hifs(void)
1584 struct pp2_hif_params params;
1585 char match[MRVL_MATCH_LEN];
1588 RTE_LCORE_FOREACH(i) {
1589 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
1593 snprintf(match, sizeof(match), "hif-%d", ret);
1594 memset(¶ms, 0, sizeof(params));
1595 params.match = match;
1596 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
1597 ret = pp2_hif_init(¶ms, &hifs[i]);
1599 RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", i);
1608 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
1611 mrvl_deinit_hifs(void)
1615 RTE_LCORE_FOREACH(i) {
1617 pp2_hif_deinit(hifs[i]);
1621 static void mrvl_set_first_last_cores(int core_id)
1623 if (core_id < mrvl_lcore_first)
1624 mrvl_lcore_first = core_id;
1626 if (core_id > mrvl_lcore_last)
1627 mrvl_lcore_last = core_id;
1631 * DPDK callback to register the virtual device.
1634 * Pointer to the virtual device.
1637 * 0 on success, negative error value otherwise.
1640 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
1642 struct rte_kvargs *kvlist;
1643 const char *ifnames[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
1645 uint32_t i, ifnum, cfgnum, core_id;
1648 params = rte_vdev_device_args(vdev);
1652 kvlist = rte_kvargs_parse(params, valid_args);
1656 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
1657 if (ifnum > RTE_DIM(ifnames))
1658 goto out_free_kvlist;
1660 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
1661 mrvl_get_ifnames, &ifnames);
1663 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
1665 RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
1666 goto out_free_kvlist;
1667 } else if (cfgnum == 1) {
1668 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
1669 mrvl_get_qoscfg, &mrvl_qos_cfg);
1673 * ret == -EEXIST is correct, it means DMA
1674 * has been already initialized (by another PMD).
1676 ret = mv_sys_dma_mem_init(RTE_MRVL_MUSDK_DMA_MEMSIZE);
1677 if (ret < 0 && ret != -EEXIST)
1678 goto out_free_kvlist;
1680 ret = mrvl_init_pp2();
1682 RTE_LOG(ERR, PMD, "Failed to init PP!\n");
1683 goto out_deinit_dma;
1686 ret = mrvl_init_hifs();
1688 goto out_deinit_hifs;
1690 for (i = 0; i < ifnum; i++) {
1691 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames[i]);
1692 ret = mrvl_eth_dev_create(vdev, ifnames[i]);
1697 rte_kvargs_free(kvlist);
1699 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
1701 mrvl_lcore_first = RTE_MAX_LCORE;
1702 mrvl_lcore_last = 0;
1704 RTE_LCORE_FOREACH(core_id) {
1705 mrvl_set_first_last_cores(core_id);
1711 mrvl_eth_dev_destroy(ifnames[i]);
1716 mv_sys_dma_mem_destroy();
1718 rte_kvargs_free(kvlist);
1724 * DPDK callback to remove virtual device.
1727 * Pointer to the removed virtual device.
1730 * 0 on success, negative error value otherwise.
1733 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
1738 name = rte_vdev_device_name(vdev);
1742 RTE_LOG(INFO, PMD, "Removing %s\n", name);
1744 for (i = 0; i < rte_eth_dev_count(); i++) {
1745 char ifname[RTE_ETH_NAME_MAX_LEN];
1747 rte_eth_dev_get_name_by_port(i, ifname);
1748 mrvl_eth_dev_destroy(ifname);
1753 mv_sys_dma_mem_destroy();
1758 static struct rte_vdev_driver pmd_mrvl_drv = {
1759 .probe = rte_pmd_mrvl_probe,
1760 .remove = rte_pmd_mrvl_remove,
1763 RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
1764 RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);