1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
7 #include <rte_ethdev_driver.h>
8 #include <rte_kvargs.h>
10 #include <rte_malloc.h>
11 #include <rte_bus_vdev.h>
14 #include <linux/ethtool.h>
15 #include <linux/sockios.h>
17 #include <net/if_arp.h>
18 #include <sys/ioctl.h>
19 #include <sys/socket.h>
21 #include <sys/types.h>
23 #include <rte_mvep_common.h>
24 #include "mrvl_ethdev.h"
26 #include "mrvl_flow.h"
30 /* bitmask with reserved hifs */
31 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
32 /* bitmask with reserved bpools */
33 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
34 /* bitmask with reserved kernel RSS tables */
35 #define MRVL_MUSDK_RSS_RESERVED 0x01
36 /* maximum number of available hifs */
37 #define MRVL_MUSDK_HIFS_MAX 9
40 #define MRVL_MUSDK_PREFETCH_SHIFT 2
42 /* TCAM has 25 entries reserved for uc/mc filter entries */
43 #define MRVL_MAC_ADDRS_MAX 25
44 #define MRVL_MATCH_LEN 16
45 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
46 /* Maximum allowable packet size */
47 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
49 #define MRVL_IFACE_NAME_ARG "iface"
50 #define MRVL_CFG_ARG "cfg"
52 #define MRVL_BURST_SIZE 64
54 #define MRVL_ARP_LENGTH 28
56 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
58 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
59 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
61 /** Port Rx offload capabilities */
62 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
63 DEV_RX_OFFLOAD_JUMBO_FRAME | \
64 DEV_RX_OFFLOAD_CHECKSUM)
66 /** Port Tx offloads capabilities */
67 #define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
68 DEV_TX_OFFLOAD_UDP_CKSUM | \
69 DEV_TX_OFFLOAD_TCP_CKSUM)
71 static const char * const valid_args[] = {
77 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
78 static struct pp2_hif *hifs[RTE_MAX_LCORE];
79 static int used_bpools[PP2_NUM_PKT_PROC] = {
80 [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
83 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
84 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
85 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
90 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
95 * To use buffer harvesting based on loopback port shadow queue structure
96 * was introduced for buffers information bookkeeping.
98 * Before sending the packet, related buffer information (pp2_buff_inf) is
99 * stored in shadow queue. After packet is transmitted no longer used
100 * packet buffer is released back to it's original hardware pool,
101 * on condition it originated from interface.
102 * In case it was generated by application itself i.e: mbuf->port field is
103 * 0xff then its released to software mempool.
105 struct mrvl_shadow_txq {
106 int head; /* write index - used when sending buffers */
107 int tail; /* read index - used when releasing buffers */
108 u16 size; /* queue occupied size */
109 u16 num_to_release; /* number of buffers sent, that can be released */
110 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
114 struct mrvl_priv *priv;
115 struct rte_mempool *mp;
124 struct mrvl_priv *priv;
128 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
129 int tx_deferred_start;
132 static int mrvl_lcore_first;
133 static int mrvl_lcore_last;
134 static int mrvl_dev_num;
136 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
137 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
138 struct pp2_hif *hif, unsigned int core_id,
139 struct mrvl_shadow_txq *sq, int qid, int force);
141 #define MRVL_XSTATS_TBL_ENTRY(name) { \
142 #name, offsetof(struct pp2_ppio_statistics, name), \
143 sizeof(((struct pp2_ppio_statistics *)0)->name) \
146 /* Table with xstats data */
151 } mrvl_xstats_tbl[] = {
152 MRVL_XSTATS_TBL_ENTRY(rx_bytes),
153 MRVL_XSTATS_TBL_ENTRY(rx_packets),
154 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
155 MRVL_XSTATS_TBL_ENTRY(rx_errors),
156 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
157 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
158 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
159 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
160 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
161 MRVL_XSTATS_TBL_ENTRY(tx_bytes),
162 MRVL_XSTATS_TBL_ENTRY(tx_packets),
163 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
164 MRVL_XSTATS_TBL_ENTRY(tx_errors)
168 mrvl_get_bpool_size(int pp2_id, int pool_id)
173 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
174 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
180 mrvl_reserve_bit(int *bitmap, int max)
182 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
193 mrvl_init_hif(int core_id)
195 struct pp2_hif_params params;
196 char match[MRVL_MATCH_LEN];
199 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
201 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
205 snprintf(match, sizeof(match), "hif-%d", ret);
206 memset(¶ms, 0, sizeof(params));
207 params.match = match;
208 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
209 ret = pp2_hif_init(¶ms, &hifs[core_id]);
211 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
218 static inline struct pp2_hif*
219 mrvl_get_hif(struct mrvl_priv *priv, int core_id)
223 if (likely(hifs[core_id] != NULL))
224 return hifs[core_id];
226 rte_spinlock_lock(&priv->lock);
228 ret = mrvl_init_hif(core_id);
230 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
234 if (core_id < mrvl_lcore_first)
235 mrvl_lcore_first = core_id;
237 if (core_id > mrvl_lcore_last)
238 mrvl_lcore_last = core_id;
240 rte_spinlock_unlock(&priv->lock);
242 return hifs[core_id];
246 * Configure rss based on dpdk rss configuration.
249 * Pointer to private structure.
251 * Pointer to RSS configuration.
254 * 0 on success, negative error value otherwise.
257 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
259 if (rss_conf->rss_key)
260 MRVL_LOG(WARNING, "Changing hash key is not supported");
262 if (rss_conf->rss_hf == 0) {
263 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
264 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
265 priv->ppio_params.inqs_params.hash_type =
266 PP2_PPIO_HASH_T_2_TUPLE;
267 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
268 priv->ppio_params.inqs_params.hash_type =
269 PP2_PPIO_HASH_T_5_TUPLE;
270 priv->rss_hf_tcp = 1;
271 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
272 priv->ppio_params.inqs_params.hash_type =
273 PP2_PPIO_HASH_T_5_TUPLE;
274 priv->rss_hf_tcp = 0;
283 * Ethernet device configuration.
285 * Prepare the driver for a given number of TX and RX queues and
289 * Pointer to Ethernet device structure.
292 * 0 on success, negative error value otherwise.
295 mrvl_dev_configure(struct rte_eth_dev *dev)
297 struct mrvl_priv *priv = dev->data->dev_private;
301 MRVL_LOG(INFO, "Device reconfiguration is not supported");
305 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
306 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
307 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
308 dev->data->dev_conf.rxmode.mq_mode);
312 if (dev->data->dev_conf.rxmode.split_hdr_size) {
313 MRVL_LOG(INFO, "Split headers not supported");
317 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
318 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
319 ETHER_HDR_LEN - ETHER_CRC_LEN;
321 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
322 dev->data->nb_rx_queues);
326 ret = mrvl_configure_txqs(priv, dev->data->port_id,
327 dev->data->nb_tx_queues);
331 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
332 priv->ppio_params.maintain_stats = 1;
333 priv->nb_rx_queues = dev->data->nb_rx_queues;
335 ret = mrvl_tm_init(dev);
339 if (dev->data->nb_rx_queues == 1 &&
340 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
341 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
342 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
347 return mrvl_configure_rss(priv,
348 &dev->data->dev_conf.rx_adv_conf.rss_conf);
352 * DPDK callback to change the MTU.
354 * Setting the MTU affects hardware MRU (packets larger than the MRU
358 * Pointer to Ethernet device structure.
363 * 0 on success, negative error value otherwise.
366 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
368 struct mrvl_priv *priv = dev->data->dev_private;
369 /* extra MV_MH_SIZE bytes are required for Marvell tag */
370 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
373 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
379 ret = pp2_ppio_set_mru(priv->ppio, mru);
383 return pp2_ppio_set_mtu(priv->ppio, mtu);
387 * DPDK callback to bring the link up.
390 * Pointer to Ethernet device structure.
393 * 0 on success, negative error value otherwise.
396 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
398 struct mrvl_priv *priv = dev->data->dev_private;
404 ret = pp2_ppio_enable(priv->ppio);
409 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
410 * as pp2_ppio_enable() changes port->t_mode from default 0 to
411 * PP2_TRAFFIC_INGRESS_EGRESS.
413 * Set mtu to default DPDK value here.
415 ret = mrvl_mtu_set(dev, dev->data->mtu);
417 pp2_ppio_disable(priv->ppio);
423 * DPDK callback to bring the link down.
426 * Pointer to Ethernet device structure.
429 * 0 on success, negative error value otherwise.
432 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
434 struct mrvl_priv *priv = dev->data->dev_private;
439 return pp2_ppio_disable(priv->ppio);
443 * DPDK callback to start tx queue.
446 * Pointer to Ethernet device structure.
448 * Transmit queue index.
451 * 0 on success, negative error value otherwise.
454 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
456 struct mrvl_priv *priv = dev->data->dev_private;
462 /* passing 1 enables given tx queue */
463 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
465 MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
469 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
475 * DPDK callback to stop tx queue.
478 * Pointer to Ethernet device structure.
480 * Transmit queue index.
483 * 0 on success, negative error value otherwise.
486 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
488 struct mrvl_priv *priv = dev->data->dev_private;
494 /* passing 0 disables given tx queue */
495 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
497 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
501 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
507 * DPDK callback to start the device.
510 * Pointer to Ethernet device structure.
513 * 0 on success, negative errno value on failure.
516 mrvl_dev_start(struct rte_eth_dev *dev)
518 struct mrvl_priv *priv = dev->data->dev_private;
519 char match[MRVL_MATCH_LEN];
520 int ret = 0, i, def_init_size;
523 return mrvl_dev_set_link_up(dev);
525 snprintf(match, sizeof(match), "ppio-%d:%d",
526 priv->pp_id, priv->ppio_id);
527 priv->ppio_params.match = match;
530 * Calculate the minimum bpool size for refill feature as follows:
531 * 2 default burst sizes multiply by number of rx queues.
532 * If the bpool size will be below this value, new buffers will
533 * be added to the pool.
535 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
537 /* In case initial bpool size configured in queues setup is
538 * smaller than minimum size add more buffers
540 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
541 if (priv->bpool_init_size < def_init_size) {
542 int buffs_to_add = def_init_size - priv->bpool_init_size;
544 priv->bpool_init_size += buffs_to_add;
545 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
547 MRVL_LOG(ERR, "Failed to add buffers to bpool");
551 * Calculate the maximum bpool size for refill feature as follows:
552 * maximum number of descriptors in rx queue multiply by number
553 * of rx queues plus minimum bpool size.
554 * In case the bpool size will exceed this value, superfluous buffers
557 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
558 priv->bpool_min_size;
560 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
562 MRVL_LOG(ERR, "Failed to init ppio");
567 * In case there are some some stale uc/mc mac addresses flush them
568 * here. It cannot be done during mrvl_dev_close() as port information
569 * is already gone at that point (due to pp2_ppio_deinit() in
572 if (!priv->uc_mc_flushed) {
573 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
576 "Failed to flush uc/mc filter list");
579 priv->uc_mc_flushed = 1;
582 if (!priv->vlan_flushed) {
583 ret = pp2_ppio_flush_vlan(priv->ppio);
585 MRVL_LOG(ERR, "Failed to flush vlan list");
588 * once pp2_ppio_flush_vlan() is supported jump to out
592 priv->vlan_flushed = 1;
595 /* For default QoS config, don't start classifier. */
597 mrvl_qos_cfg->port[dev->data->port_id].use_global_defaults == 0) {
598 ret = mrvl_start_qos_mapping(priv);
600 MRVL_LOG(ERR, "Failed to setup QoS mapping");
605 ret = mrvl_dev_set_link_up(dev);
607 MRVL_LOG(ERR, "Failed to set link up");
611 /* start tx queues */
612 for (i = 0; i < dev->data->nb_tx_queues; i++) {
613 struct mrvl_txq *txq = dev->data->tx_queues[i];
615 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
617 if (!txq->tx_deferred_start)
621 * All txqs are started by default. Stop them
622 * so that tx_deferred_start works as expected.
624 ret = mrvl_tx_queue_stop(dev, i);
634 MRVL_LOG(ERR, "Failed to start device");
635 pp2_ppio_deinit(priv->ppio);
640 * Flush receive queues.
643 * Pointer to Ethernet device structure.
646 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
650 MRVL_LOG(INFO, "Flushing rx queues");
651 for (i = 0; i < dev->data->nb_rx_queues; i++) {
655 struct mrvl_rxq *q = dev->data->rx_queues[i];
656 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
658 num = MRVL_PP2_RXD_MAX;
659 ret = pp2_ppio_recv(q->priv->ppio,
660 q->priv->rxq_map[q->queue_id].tc,
661 q->priv->rxq_map[q->queue_id].inq,
662 descs, (uint16_t *)&num);
663 } while (ret == 0 && num);
668 * Flush transmit shadow queues.
671 * Pointer to Ethernet device structure.
674 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
677 struct mrvl_txq *txq;
679 MRVL_LOG(INFO, "Flushing tx shadow queues");
680 for (i = 0; i < dev->data->nb_tx_queues; i++) {
681 txq = (struct mrvl_txq *)dev->data->tx_queues[i];
683 for (j = 0; j < RTE_MAX_LCORE; j++) {
684 struct mrvl_shadow_txq *sq;
689 sq = &txq->shadow_txqs[j];
690 mrvl_free_sent_buffers(txq->priv->ppio,
691 hifs[j], j, sq, txq->queue_id, 1);
692 while (sq->tail != sq->head) {
693 uint64_t addr = cookie_addr_high |
694 sq->ent[sq->tail].buff.cookie;
696 (struct rte_mbuf *)addr);
697 sq->tail = (sq->tail + 1) &
698 MRVL_PP2_TX_SHADOWQ_MASK;
700 memset(sq, 0, sizeof(*sq));
706 * Flush hardware bpool (buffer-pool).
709 * Pointer to Ethernet device structure.
712 mrvl_flush_bpool(struct rte_eth_dev *dev)
714 struct mrvl_priv *priv = dev->data->dev_private;
718 unsigned int core_id = rte_lcore_id();
720 if (core_id == LCORE_ID_ANY)
723 hif = mrvl_get_hif(priv, core_id);
725 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
727 MRVL_LOG(ERR, "Failed to get bpool buffers number");
732 struct pp2_buff_inf inf;
735 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
739 addr = cookie_addr_high | inf.cookie;
740 rte_pktmbuf_free((struct rte_mbuf *)addr);
745 * DPDK callback to stop the device.
748 * Pointer to Ethernet device structure.
751 mrvl_dev_stop(struct rte_eth_dev *dev)
753 mrvl_dev_set_link_down(dev);
757 * DPDK callback to close the device.
760 * Pointer to Ethernet device structure.
763 mrvl_dev_close(struct rte_eth_dev *dev)
765 struct mrvl_priv *priv = dev->data->dev_private;
768 mrvl_flush_rx_queues(dev);
769 mrvl_flush_tx_shadow_queues(dev);
770 mrvl_flow_deinit(dev);
771 mrvl_mtr_deinit(dev);
773 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
774 struct pp2_ppio_tc_params *tc_params =
775 &priv->ppio_params.inqs_params.tcs_params[i];
777 if (tc_params->inqs_params) {
778 rte_free(tc_params->inqs_params);
779 tc_params->inqs_params = NULL;
784 pp2_cls_tbl_deinit(priv->cls_tbl);
785 priv->cls_tbl = NULL;
789 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
790 priv->qos_tbl = NULL;
793 mrvl_flush_bpool(dev);
797 pp2_ppio_deinit(priv->ppio);
801 /* policer must be released after ppio deinitialization */
802 if (priv->default_policer) {
803 pp2_cls_plcr_deinit(priv->default_policer);
804 priv->default_policer = NULL;
809 * DPDK callback to retrieve physical link information.
812 * Pointer to Ethernet device structure.
813 * @param wait_to_complete
814 * Wait for request completion (ignored).
817 * 0 on success, negative error value otherwise.
820 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
824 * once MUSDK provides necessary API use it here
826 struct mrvl_priv *priv = dev->data->dev_private;
827 struct ethtool_cmd edata;
829 int ret, fd, link_up;
834 edata.cmd = ETHTOOL_GSET;
836 strcpy(req.ifr_name, dev->data->name);
837 req.ifr_data = (void *)&edata;
839 fd = socket(AF_INET, SOCK_DGRAM, 0);
843 ret = ioctl(fd, SIOCETHTOOL, &req);
851 switch (ethtool_cmd_speed(&edata)) {
853 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
856 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
859 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
862 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
865 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
868 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
869 ETH_LINK_HALF_DUPLEX;
870 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
872 pp2_ppio_get_link_state(priv->ppio, &link_up);
873 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
879 * DPDK callback to enable promiscuous mode.
882 * Pointer to Ethernet device structure.
885 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
887 struct mrvl_priv *priv = dev->data->dev_private;
896 ret = pp2_ppio_set_promisc(priv->ppio, 1);
898 MRVL_LOG(ERR, "Failed to enable promiscuous mode");
902 * DPDK callback to enable allmulti mode.
905 * Pointer to Ethernet device structure.
908 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
910 struct mrvl_priv *priv = dev->data->dev_private;
919 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
921 MRVL_LOG(ERR, "Failed enable all-multicast mode");
925 * DPDK callback to disable promiscuous mode.
928 * Pointer to Ethernet device structure.
931 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
933 struct mrvl_priv *priv = dev->data->dev_private;
939 ret = pp2_ppio_set_promisc(priv->ppio, 0);
941 MRVL_LOG(ERR, "Failed to disable promiscuous mode");
945 * DPDK callback to disable allmulticast mode.
948 * Pointer to Ethernet device structure.
951 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
953 struct mrvl_priv *priv = dev->data->dev_private;
959 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
961 MRVL_LOG(ERR, "Failed to disable all-multicast mode");
965 * DPDK callback to remove a MAC address.
968 * Pointer to Ethernet device structure.
973 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
975 struct mrvl_priv *priv = dev->data->dev_private;
976 char buf[ETHER_ADDR_FMT_SIZE];
985 ret = pp2_ppio_remove_mac_addr(priv->ppio,
986 dev->data->mac_addrs[index].addr_bytes);
988 ether_format_addr(buf, sizeof(buf),
989 &dev->data->mac_addrs[index]);
990 MRVL_LOG(ERR, "Failed to remove mac %s", buf);
995 * DPDK callback to add a MAC address.
998 * Pointer to Ethernet device structure.
1000 * MAC address to register.
1002 * MAC address index.
1004 * VMDq pool index to associate address with (unused).
1007 * 0 on success, negative error value otherwise.
1010 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1011 uint32_t index, uint32_t vmdq __rte_unused)
1013 struct mrvl_priv *priv = dev->data->dev_private;
1014 char buf[ETHER_ADDR_FMT_SIZE];
1021 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
1028 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
1029 * parameter uc_filter_max. Maximum number of mc addresses is then
1030 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
1033 * If more than uc_filter_max uc addresses were added to filter list
1034 * then NIC will switch to promiscuous mode automatically.
1036 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
1037 * were added to filter list then NIC will switch to all-multicast mode
1040 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
1042 ether_format_addr(buf, sizeof(buf), mac_addr);
1043 MRVL_LOG(ERR, "Failed to add mac %s", buf);
1051 * DPDK callback to set the primary MAC address.
1054 * Pointer to Ethernet device structure.
1056 * MAC address to register.
1059 * 0 on success, negative error value otherwise.
1062 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1064 struct mrvl_priv *priv = dev->data->dev_private;
1073 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
1075 char buf[ETHER_ADDR_FMT_SIZE];
1076 ether_format_addr(buf, sizeof(buf), mac_addr);
1077 MRVL_LOG(ERR, "Failed to set mac to %s", buf);
1084 * DPDK callback to get device statistics.
1087 * Pointer to Ethernet device structure.
1089 * Stats structure output buffer.
1092 * 0 on success, negative error value otherwise.
1095 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1097 struct mrvl_priv *priv = dev->data->dev_private;
1098 struct pp2_ppio_statistics ppio_stats;
1099 uint64_t drop_mac = 0;
1100 unsigned int i, idx, ret;
1105 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1106 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1107 struct pp2_ppio_inq_statistics rx_stats;
1112 idx = rxq->queue_id;
1113 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1115 "rx queue %d stats out of range (0 - %d)",
1116 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1120 ret = pp2_ppio_inq_get_statistics(priv->ppio,
1121 priv->rxq_map[idx].tc,
1122 priv->rxq_map[idx].inq,
1124 if (unlikely(ret)) {
1126 "Failed to update rx queue %d stats", idx);
1130 stats->q_ibytes[idx] = rxq->bytes_recv;
1131 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
1132 stats->q_errors[idx] = rx_stats.drop_early +
1133 rx_stats.drop_fullq +
1136 stats->ibytes += rxq->bytes_recv;
1137 drop_mac += rxq->drop_mac;
1140 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1141 struct mrvl_txq *txq = dev->data->tx_queues[i];
1142 struct pp2_ppio_outq_statistics tx_stats;
1147 idx = txq->queue_id;
1148 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1150 "tx queue %d stats out of range (0 - %d)",
1151 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1154 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1156 if (unlikely(ret)) {
1158 "Failed to update tx queue %d stats", idx);
1162 stats->q_opackets[idx] = tx_stats.deq_desc;
1163 stats->q_obytes[idx] = txq->bytes_sent;
1164 stats->obytes += txq->bytes_sent;
1167 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1168 if (unlikely(ret)) {
1169 MRVL_LOG(ERR, "Failed to update port statistics");
1173 stats->ipackets += ppio_stats.rx_packets - drop_mac;
1174 stats->opackets += ppio_stats.tx_packets;
1175 stats->imissed += ppio_stats.rx_fullq_dropped +
1176 ppio_stats.rx_bm_dropped +
1177 ppio_stats.rx_early_dropped +
1178 ppio_stats.rx_fifo_dropped +
1179 ppio_stats.rx_cls_dropped;
1180 stats->ierrors = drop_mac;
1186 * DPDK callback to clear device statistics.
1189 * Pointer to Ethernet device structure.
1192 mrvl_stats_reset(struct rte_eth_dev *dev)
1194 struct mrvl_priv *priv = dev->data->dev_private;
1200 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1201 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1203 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1204 priv->rxq_map[i].inq, NULL, 1);
1205 rxq->bytes_recv = 0;
1209 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1210 struct mrvl_txq *txq = dev->data->tx_queues[i];
1212 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1213 txq->bytes_sent = 0;
1216 pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1220 * DPDK callback to get extended statistics.
1223 * Pointer to Ethernet device structure.
1225 * Pointer to xstats table.
1227 * Number of entries in xstats table.
1229 * Negative value on error, number of read xstats otherwise.
1232 mrvl_xstats_get(struct rte_eth_dev *dev,
1233 struct rte_eth_xstat *stats, unsigned int n)
1235 struct mrvl_priv *priv = dev->data->dev_private;
1236 struct pp2_ppio_statistics ppio_stats;
1242 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1243 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
1246 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
1247 val = *(uint32_t *)((uint8_t *)&ppio_stats +
1248 mrvl_xstats_tbl[i].offset);
1249 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
1250 val = *(uint64_t *)((uint8_t *)&ppio_stats +
1251 mrvl_xstats_tbl[i].offset);
1256 stats[i].value = val;
1263 * DPDK callback to reset extended statistics.
1266 * Pointer to Ethernet device structure.
1269 mrvl_xstats_reset(struct rte_eth_dev *dev)
1271 mrvl_stats_reset(dev);
1275 * DPDK callback to get extended statistics names.
1277 * @param dev (unused)
1278 * Pointer to Ethernet device structure.
1279 * @param xstats_names
1280 * Pointer to xstats names table.
1282 * Size of the xstats names table.
1284 * Number of read names.
1287 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1288 struct rte_eth_xstat_name *xstats_names,
1294 return RTE_DIM(mrvl_xstats_tbl);
1296 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
1297 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
1298 mrvl_xstats_tbl[i].name);
1304 * DPDK callback to get information about the device.
1307 * Pointer to Ethernet device structure (unused).
1309 * Info structure output buffer.
1312 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
1313 struct rte_eth_dev_info *info)
1315 info->speed_capa = ETH_LINK_SPEED_10M |
1316 ETH_LINK_SPEED_100M |
1320 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1321 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1322 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1324 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1325 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1326 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1328 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1329 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1330 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1332 info->rx_offload_capa = MRVL_RX_OFFLOADS;
1333 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
1335 info->tx_offload_capa = MRVL_TX_OFFLOADS;
1336 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
1338 info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1339 ETH_RSS_NONFRAG_IPV4_TCP |
1340 ETH_RSS_NONFRAG_IPV4_UDP;
1342 /* By default packets are dropped if no descriptors are available */
1343 info->default_rxconf.rx_drop_en = 1;
1345 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1349 * Return supported packet types.
1352 * Pointer to Ethernet device structure (unused).
1355 * Const pointer to the table with supported packet types.
1357 static const uint32_t *
1358 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1360 static const uint32_t ptypes[] = {
1362 RTE_PTYPE_L2_ETHER_VLAN,
1363 RTE_PTYPE_L2_ETHER_QINQ,
1365 RTE_PTYPE_L3_IPV4_EXT,
1366 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1368 RTE_PTYPE_L3_IPV6_EXT,
1369 RTE_PTYPE_L2_ETHER_ARP,
1378 * DPDK callback to get information about specific receive queue.
1381 * Pointer to Ethernet device structure.
1382 * @param rx_queue_id
1383 * Receive queue index.
1385 * Receive queue information structure.
1387 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1388 struct rte_eth_rxq_info *qinfo)
1390 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1391 struct mrvl_priv *priv = dev->data->dev_private;
1392 int inq = priv->rxq_map[rx_queue_id].inq;
1393 int tc = priv->rxq_map[rx_queue_id].tc;
1394 struct pp2_ppio_tc_params *tc_params =
1395 &priv->ppio_params.inqs_params.tcs_params[tc];
1398 qinfo->nb_desc = tc_params->inqs_params[inq].size;
1402 * DPDK callback to get information about specific transmit queue.
1405 * Pointer to Ethernet device structure.
1406 * @param tx_queue_id
1407 * Transmit queue index.
1409 * Transmit queue information structure.
1411 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1412 struct rte_eth_txq_info *qinfo)
1414 struct mrvl_priv *priv = dev->data->dev_private;
1415 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
1418 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1419 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1423 * DPDK callback to Configure a VLAN filter.
1426 * Pointer to Ethernet device structure.
1428 * VLAN ID to filter.
1433 * 0 on success, negative error value otherwise.
1436 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1438 struct mrvl_priv *priv = dev->data->dev_private;
1446 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1447 pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1451 * Release buffers to hardware bpool (buffer-pool)
1454 * Receive queue pointer.
1456 * Number of buffers to release to bpool.
1459 * 0 on success, negative error value otherwise.
1462 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1464 struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
1465 struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
1467 unsigned int core_id;
1468 struct pp2_hif *hif;
1469 struct pp2_bpool *bpool;
1471 core_id = rte_lcore_id();
1472 if (core_id == LCORE_ID_ANY)
1475 hif = mrvl_get_hif(rxq->priv, core_id);
1479 bpool = rxq->priv->bpool;
1481 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1485 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1487 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1489 for (i = 0; i < num; i++) {
1490 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1491 != cookie_addr_high) {
1493 "mbuf virtual addr high 0x%lx out of range",
1494 (uint64_t)mbufs[i] >> 32);
1498 entries[i].buff.addr =
1499 rte_mbuf_data_iova_default(mbufs[i]);
1500 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
1501 entries[i].bpool = bpool;
1504 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1505 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1512 for (; i < num; i++)
1513 rte_pktmbuf_free(mbufs[i]);
1519 * DPDK callback to configure the receive queue.
1522 * Pointer to Ethernet device structure.
1526 * Number of descriptors to configure in queue.
1528 * NUMA socket on which memory must be allocated.
1530 * Thresholds parameters.
1532 * Memory pool for buffer allocations.
1535 * 0 on success, negative error value otherwise.
1538 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1539 unsigned int socket,
1540 const struct rte_eth_rxconf *conf,
1541 struct rte_mempool *mp)
1543 struct mrvl_priv *priv = dev->data->dev_private;
1544 struct mrvl_rxq *rxq;
1546 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1550 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1552 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1554 * Unknown TC mapping, mapping will not have a correct queue.
1556 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
1557 idx, priv->ppio_id);
1561 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
1562 MRVL_PKT_EFFEC_OFFS;
1563 if (min_size < max_rx_pkt_len) {
1565 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
1566 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
1567 MRVL_PKT_EFFEC_OFFS,
1572 if (dev->data->rx_queues[idx]) {
1573 rte_free(dev->data->rx_queues[idx]);
1574 dev->data->rx_queues[idx] = NULL;
1577 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
1583 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
1584 rxq->queue_id = idx;
1585 rxq->port_id = dev->data->port_id;
1586 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
1588 tc = priv->rxq_map[rxq->queue_id].tc,
1589 inq = priv->rxq_map[rxq->queue_id].inq;
1590 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
1593 ret = mrvl_fill_bpool(rxq, desc);
1599 priv->bpool_init_size += desc;
1601 dev->data->rx_queues[idx] = rxq;
1607 * DPDK callback to release the receive queue.
1610 * Generic receive queue pointer.
1613 mrvl_rx_queue_release(void *rxq)
1615 struct mrvl_rxq *q = rxq;
1616 struct pp2_ppio_tc_params *tc_params;
1617 int i, num, tc, inq;
1618 struct pp2_hif *hif;
1619 unsigned int core_id = rte_lcore_id();
1621 if (core_id == LCORE_ID_ANY)
1627 hif = mrvl_get_hif(q->priv, core_id);
1632 tc = q->priv->rxq_map[q->queue_id].tc;
1633 inq = q->priv->rxq_map[q->queue_id].inq;
1634 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1635 num = tc_params->inqs_params[inq].size;
1636 for (i = 0; i < num; i++) {
1637 struct pp2_buff_inf inf;
1640 pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
1641 addr = cookie_addr_high | inf.cookie;
1642 rte_pktmbuf_free((struct rte_mbuf *)addr);
1649 * DPDK callback to configure the transmit queue.
1652 * Pointer to Ethernet device structure.
1654 * Transmit queue index.
1656 * Number of descriptors to configure in the queue.
1658 * NUMA socket on which memory must be allocated.
1660 * Tx queue configuration parameters.
1663 * 0 on success, negative error value otherwise.
1666 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1667 unsigned int socket,
1668 const struct rte_eth_txconf *conf)
1670 struct mrvl_priv *priv = dev->data->dev_private;
1671 struct mrvl_txq *txq;
1673 if (dev->data->tx_queues[idx]) {
1674 rte_free(dev->data->tx_queues[idx]);
1675 dev->data->tx_queues[idx] = NULL;
1678 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1683 txq->queue_id = idx;
1684 txq->port_id = dev->data->port_id;
1685 txq->tx_deferred_start = conf->tx_deferred_start;
1686 dev->data->tx_queues[idx] = txq;
1688 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1694 * DPDK callback to release the transmit queue.
1697 * Generic transmit queue pointer.
1700 mrvl_tx_queue_release(void *txq)
1702 struct mrvl_txq *q = txq;
1711 * DPDK callback to get flow control configuration.
1714 * Pointer to Ethernet device structure.
1716 * Pointer to the flow control configuration.
1719 * 0 on success, negative error value otherwise.
1722 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1724 struct mrvl_priv *priv = dev->data->dev_private;
1730 ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
1732 MRVL_LOG(ERR, "Failed to read rx pause state");
1736 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
1742 * DPDK callback to set flow control configuration.
1745 * Pointer to Ethernet device structure.
1747 * Pointer to the flow control configuration.
1750 * 0 on success, negative error value otherwise.
1753 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1755 struct mrvl_priv *priv = dev->data->dev_private;
1760 if (fc_conf->high_water ||
1761 fc_conf->low_water ||
1762 fc_conf->pause_time ||
1763 fc_conf->mac_ctrl_frame_fwd ||
1765 MRVL_LOG(ERR, "Flowctrl parameter is not supported");
1770 if (fc_conf->mode == RTE_FC_NONE ||
1771 fc_conf->mode == RTE_FC_RX_PAUSE) {
1774 en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
1775 ret = pp2_ppio_set_rx_pause(priv->ppio, en);
1778 "Failed to change flowctrl on RX side");
1787 * Update RSS hash configuration
1790 * Pointer to Ethernet device structure.
1792 * Pointer to RSS configuration.
1795 * 0 on success, negative error value otherwise.
1798 mrvl_rss_hash_update(struct rte_eth_dev *dev,
1799 struct rte_eth_rss_conf *rss_conf)
1801 struct mrvl_priv *priv = dev->data->dev_private;
1806 return mrvl_configure_rss(priv, rss_conf);
1810 * DPDK callback to get RSS hash configuration.
1813 * Pointer to Ethernet device structure.
1815 * Pointer to RSS configuration.
1821 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
1822 struct rte_eth_rss_conf *rss_conf)
1824 struct mrvl_priv *priv = dev->data->dev_private;
1825 enum pp2_ppio_hash_type hash_type =
1826 priv->ppio_params.inqs_params.hash_type;
1828 rss_conf->rss_key = NULL;
1830 if (hash_type == PP2_PPIO_HASH_T_NONE)
1831 rss_conf->rss_hf = 0;
1832 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
1833 rss_conf->rss_hf = ETH_RSS_IPV4;
1834 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
1835 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
1836 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
1837 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
1843 * DPDK callback to get rte_flow callbacks.
1846 * Pointer to the device structure.
1850 * Flow filter operation.
1852 * Pointer to pass the flow ops.
1855 * 0 on success, negative error value otherwise.
1858 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
1859 enum rte_filter_type filter_type,
1860 enum rte_filter_op filter_op, void *arg)
1862 switch (filter_type) {
1863 case RTE_ETH_FILTER_GENERIC:
1864 if (filter_op != RTE_ETH_FILTER_GET)
1866 *(const void **)arg = &mrvl_flow_ops;
1869 MRVL_LOG(WARNING, "Filter type (%d) not supported",
1876 * DPDK callback to get rte_mtr callbacks.
1879 * Pointer to the device structure.
1881 * Pointer to pass the mtr ops.
1887 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
1889 *(const void **)ops = &mrvl_mtr_ops;
1895 * DPDK callback to get rte_tm callbacks.
1898 * Pointer to the device structure.
1900 * Pointer to pass the tm ops.
1906 mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
1908 *(const void **)ops = &mrvl_tm_ops;
1913 static const struct eth_dev_ops mrvl_ops = {
1914 .dev_configure = mrvl_dev_configure,
1915 .dev_start = mrvl_dev_start,
1916 .dev_stop = mrvl_dev_stop,
1917 .dev_set_link_up = mrvl_dev_set_link_up,
1918 .dev_set_link_down = mrvl_dev_set_link_down,
1919 .dev_close = mrvl_dev_close,
1920 .link_update = mrvl_link_update,
1921 .promiscuous_enable = mrvl_promiscuous_enable,
1922 .allmulticast_enable = mrvl_allmulticast_enable,
1923 .promiscuous_disable = mrvl_promiscuous_disable,
1924 .allmulticast_disable = mrvl_allmulticast_disable,
1925 .mac_addr_remove = mrvl_mac_addr_remove,
1926 .mac_addr_add = mrvl_mac_addr_add,
1927 .mac_addr_set = mrvl_mac_addr_set,
1928 .mtu_set = mrvl_mtu_set,
1929 .stats_get = mrvl_stats_get,
1930 .stats_reset = mrvl_stats_reset,
1931 .xstats_get = mrvl_xstats_get,
1932 .xstats_reset = mrvl_xstats_reset,
1933 .xstats_get_names = mrvl_xstats_get_names,
1934 .dev_infos_get = mrvl_dev_infos_get,
1935 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
1936 .rxq_info_get = mrvl_rxq_info_get,
1937 .txq_info_get = mrvl_txq_info_get,
1938 .vlan_filter_set = mrvl_vlan_filter_set,
1939 .tx_queue_start = mrvl_tx_queue_start,
1940 .tx_queue_stop = mrvl_tx_queue_stop,
1941 .rx_queue_setup = mrvl_rx_queue_setup,
1942 .rx_queue_release = mrvl_rx_queue_release,
1943 .tx_queue_setup = mrvl_tx_queue_setup,
1944 .tx_queue_release = mrvl_tx_queue_release,
1945 .flow_ctrl_get = mrvl_flow_ctrl_get,
1946 .flow_ctrl_set = mrvl_flow_ctrl_set,
1947 .rss_hash_update = mrvl_rss_hash_update,
1948 .rss_hash_conf_get = mrvl_rss_hash_conf_get,
1949 .filter_ctrl = mrvl_eth_filter_ctrl,
1950 .mtr_ops_get = mrvl_mtr_ops_get,
1951 .tm_ops_get = mrvl_tm_ops_get,
1955 * Return packet type information and l3/l4 offsets.
1958 * Pointer to the received packet descriptor.
1965 * Packet type information.
1967 static inline uint64_t
1968 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
1969 uint8_t *l3_offset, uint8_t *l4_offset)
1971 enum pp2_inq_l3_type l3_type;
1972 enum pp2_inq_l4_type l4_type;
1973 enum pp2_inq_vlan_tag vlan_tag;
1974 uint64_t packet_type;
1976 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
1977 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
1978 pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
1980 packet_type = RTE_PTYPE_L2_ETHER;
1983 case PP2_INQ_VLAN_TAG_SINGLE:
1984 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1986 case PP2_INQ_VLAN_TAG_DOUBLE:
1987 case PP2_INQ_VLAN_TAG_TRIPLE:
1988 packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
1995 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
1996 packet_type |= RTE_PTYPE_L3_IPV4;
1998 case PP2_INQ_L3_TYPE_IPV4_OK:
1999 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
2001 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
2002 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
2004 case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
2005 packet_type |= RTE_PTYPE_L3_IPV6;
2007 case PP2_INQ_L3_TYPE_IPV6_EXT:
2008 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
2010 case PP2_INQ_L3_TYPE_ARP:
2011 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
2013 * In case of ARP l4_offset is set to wrong value.
2014 * Set it to proper one so that later on mbuf->l3_len can be
2015 * calculated subtracting l4_offset and l3_offset.
2017 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
2020 MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
2025 case PP2_INQ_L4_TYPE_TCP:
2026 packet_type |= RTE_PTYPE_L4_TCP;
2028 case PP2_INQ_L4_TYPE_UDP:
2029 packet_type |= RTE_PTYPE_L4_UDP;
2032 MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
2040 * Get offload information from the received packet descriptor.
2043 * Pointer to the received packet descriptor.
2046 * Mbuf offload flags.
2048 static inline uint64_t
2049 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
2052 enum pp2_inq_desc_status status;
2054 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
2055 if (unlikely(status != PP2_DESC_ERR_OK))
2056 flags = PKT_RX_IP_CKSUM_BAD;
2058 flags = PKT_RX_IP_CKSUM_GOOD;
2060 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
2061 if (unlikely(status != PP2_DESC_ERR_OK))
2062 flags |= PKT_RX_L4_CKSUM_BAD;
2064 flags |= PKT_RX_L4_CKSUM_GOOD;
2070 * DPDK callback for receive.
2073 * Generic pointer to the receive queue.
2075 * Array to store received packets.
2077 * Maximum number of packets in array.
2080 * Number of packets successfully received.
2083 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2085 struct mrvl_rxq *q = rxq;
2086 struct pp2_ppio_desc descs[nb_pkts];
2087 struct pp2_bpool *bpool;
2088 int i, ret, rx_done = 0;
2090 struct pp2_hif *hif;
2091 unsigned int core_id = rte_lcore_id();
2093 hif = mrvl_get_hif(q->priv, core_id);
2095 if (unlikely(!q->priv->ppio || !hif))
2098 bpool = q->priv->bpool;
2100 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
2101 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
2102 if (unlikely(ret < 0)) {
2103 MRVL_LOG(ERR, "Failed to receive packets");
2106 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
2108 for (i = 0; i < nb_pkts; i++) {
2109 struct rte_mbuf *mbuf;
2110 uint8_t l3_offset, l4_offset;
2111 enum pp2_inq_desc_status status;
2114 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2115 struct pp2_ppio_desc *pref_desc;
2118 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
2119 pref_addr = cookie_addr_high |
2120 pp2_ppio_inq_desc_get_cookie(pref_desc);
2121 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
2122 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
2125 addr = cookie_addr_high |
2126 pp2_ppio_inq_desc_get_cookie(&descs[i]);
2127 mbuf = (struct rte_mbuf *)addr;
2128 rte_pktmbuf_reset(mbuf);
2130 /* drop packet in case of mac, overrun or resource error */
2131 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
2132 if (unlikely(status != PP2_DESC_ERR_OK)) {
2133 struct pp2_buff_inf binf = {
2134 .addr = rte_mbuf_data_iova_default(mbuf),
2135 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
2138 pp2_bpool_put_buff(hif, bpool, &binf);
2139 mrvl_port_bpool_size
2140 [bpool->pp2_id][bpool->id][core_id]++;
2145 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
2146 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
2147 mbuf->data_len = mbuf->pkt_len;
2148 mbuf->port = q->port_id;
2150 mrvl_desc_to_packet_type_and_offset(&descs[i],
2153 mbuf->l2_len = l3_offset;
2154 mbuf->l3_len = l4_offset - l3_offset;
2156 if (likely(q->cksum_enabled))
2157 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
2159 rx_pkts[rx_done++] = mbuf;
2160 q->bytes_recv += mbuf->pkt_len;
2163 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
2164 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
2166 if (unlikely(num <= q->priv->bpool_min_size ||
2167 (!rx_done && num < q->priv->bpool_init_size))) {
2168 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
2170 MRVL_LOG(ERR, "Failed to fill bpool");
2171 } else if (unlikely(num > q->priv->bpool_max_size)) {
2173 int pkt_to_remove = num - q->priv->bpool_init_size;
2174 struct rte_mbuf *mbuf;
2175 struct pp2_buff_inf buff;
2178 "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
2179 bpool->pp2_id, q->priv->ppio->port_id,
2180 bpool->id, pkt_to_remove, num,
2181 q->priv->bpool_init_size);
2183 for (i = 0; i < pkt_to_remove; i++) {
2184 ret = pp2_bpool_get_buff(hif, bpool, &buff);
2187 mbuf = (struct rte_mbuf *)
2188 (cookie_addr_high | buff.cookie);
2189 rte_pktmbuf_free(mbuf);
2191 mrvl_port_bpool_size
2192 [bpool->pp2_id][bpool->id][core_id] -= i;
2194 rte_spinlock_unlock(&q->priv->lock);
2201 * Prepare offload information.
2205 * @param packet_type
2206 * Packet type bitfield.
2208 * Pointer to the pp2_ouq_l3_type structure.
2210 * Pointer to the pp2_outq_l4_type structure.
2211 * @param gen_l3_cksum
2212 * Will be set to 1 in case l3 checksum is computed.
2214 * Will be set to 1 in case l4 checksum is computed.
2217 * 0 on success, negative error value otherwise.
2220 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
2221 enum pp2_outq_l3_type *l3_type,
2222 enum pp2_outq_l4_type *l4_type,
2227 * Based on ol_flags prepare information
2228 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
2231 if (ol_flags & PKT_TX_IPV4) {
2232 *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
2233 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
2234 } else if (ol_flags & PKT_TX_IPV6) {
2235 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
2236 /* no checksum for ipv6 header */
2239 /* if something different then stop processing */
2243 ol_flags &= PKT_TX_L4_MASK;
2244 if ((packet_type & RTE_PTYPE_L4_TCP) &&
2245 ol_flags == PKT_TX_TCP_CKSUM) {
2246 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
2248 } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
2249 ol_flags == PKT_TX_UDP_CKSUM) {
2250 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
2253 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
2254 /* no checksum for other type */
2262 * Release already sent buffers to bpool (buffer-pool).
2265 * Pointer to the port structure.
2267 * Pointer to the MUSDK hardware interface.
2269 * Pointer to the shadow queue.
2273 * Force releasing packets.
2276 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
2277 unsigned int core_id, struct mrvl_shadow_txq *sq,
2280 struct buff_release_entry *entry;
2281 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
2284 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
2286 sq->num_to_release += nb_done;
2288 if (likely(!force &&
2289 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
2292 nb_done = sq->num_to_release;
2293 sq->num_to_release = 0;
2295 for (i = 0; i < nb_done; i++) {
2296 entry = &sq->ent[sq->tail + num];
2297 if (unlikely(!entry->buff.addr)) {
2299 "Shadow memory @%d: cookie(%lx), pa(%lx)!",
2300 sq->tail, (u64)entry->buff.cookie,
2301 (u64)entry->buff.addr);
2306 if (unlikely(!entry->bpool)) {
2307 struct rte_mbuf *mbuf;
2309 mbuf = (struct rte_mbuf *)
2310 (cookie_addr_high | entry->buff.cookie);
2311 rte_pktmbuf_free(mbuf);
2316 mrvl_port_bpool_size
2317 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
2319 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
2324 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2326 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2333 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2334 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2340 * DPDK callback for transmit.
2343 * Generic pointer transmit queue.
2345 * Packets to transmit.
2347 * Number of packets in array.
2350 * Number of packets successfully transmitted.
2353 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2355 struct mrvl_txq *q = txq;
2356 struct mrvl_shadow_txq *sq;
2357 struct pp2_hif *hif;
2358 struct pp2_ppio_desc descs[nb_pkts];
2359 unsigned int core_id = rte_lcore_id();
2360 int i, ret, bytes_sent = 0;
2361 uint16_t num, sq_free_size;
2364 hif = mrvl_get_hif(q->priv, core_id);
2365 sq = &q->shadow_txqs[core_id];
2367 if (unlikely(!q->priv->ppio || !hif))
2371 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2372 sq, q->queue_id, 0);
2374 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2375 if (unlikely(nb_pkts > sq_free_size)) {
2377 "No room in shadow queue for %d packets! %d packets will be sent.",
2378 nb_pkts, sq_free_size);
2379 nb_pkts = sq_free_size;
2382 for (i = 0; i < nb_pkts; i++) {
2383 struct rte_mbuf *mbuf = tx_pkts[i];
2384 int gen_l3_cksum, gen_l4_cksum;
2385 enum pp2_outq_l3_type l3_type;
2386 enum pp2_outq_l4_type l4_type;
2388 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2389 struct rte_mbuf *pref_pkt_hdr;
2391 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2392 rte_mbuf_prefetch_part1(pref_pkt_hdr);
2393 rte_mbuf_prefetch_part2(pref_pkt_hdr);
2396 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
2397 sq->ent[sq->head].buff.addr =
2398 rte_mbuf_data_iova_default(mbuf);
2399 sq->ent[sq->head].bpool =
2400 (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
2401 mbuf->refcnt > 1)) ? NULL :
2402 mrvl_port_to_bpool_lookup[mbuf->port];
2403 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
2406 pp2_ppio_outq_desc_reset(&descs[i]);
2407 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
2408 rte_pktmbuf_iova(mbuf));
2409 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
2410 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
2411 rte_pktmbuf_pkt_len(mbuf));
2413 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2415 * in case unsupported ol_flags were passed
2416 * do not update descriptor offload information
2418 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
2419 &l3_type, &l4_type, &gen_l3_cksum,
2424 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2426 mbuf->l2_len + mbuf->l3_len,
2427 gen_l3_cksum, gen_l4_cksum);
2431 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2432 /* number of packets that were not sent */
2433 if (unlikely(num > nb_pkts)) {
2434 for (i = nb_pkts; i < num; i++) {
2435 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2436 MRVL_PP2_TX_SHADOWQ_MASK;
2437 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
2439 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2441 sq->size -= num - nb_pkts;
2444 q->bytes_sent += bytes_sent;
2450 * Initialize packet processor.
2453 * 0 on success, negative error value otherwise.
2458 struct pp2_init_params init_params;
2460 memset(&init_params, 0, sizeof(init_params));
2461 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
2462 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
2463 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
2465 return pp2_init(&init_params);
2469 * Deinitialize packet processor.
2472 * 0 on success, negative error value otherwise.
2475 mrvl_deinit_pp2(void)
2481 * Create private device structure.
2484 * Pointer to the port name passed in the initialization parameters.
2487 * Pointer to the newly allocated private device structure.
2489 static struct mrvl_priv *
2490 mrvl_priv_create(const char *dev_name)
2492 struct pp2_bpool_params bpool_params;
2493 char match[MRVL_MATCH_LEN];
2494 struct mrvl_priv *priv;
2497 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
2501 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
2502 &priv->pp_id, &priv->ppio_id);
2506 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
2507 PP2_BPOOL_NUM_POOLS);
2510 priv->bpool_bit = bpool_bit;
2512 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
2514 memset(&bpool_params, 0, sizeof(bpool_params));
2515 bpool_params.match = match;
2516 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
2517 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
2519 goto out_clear_bpool_bit;
2521 priv->ppio_params.type = PP2_PPIO_T_NIC;
2522 rte_spinlock_init(&priv->lock);
2525 out_clear_bpool_bit:
2526 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2533 * Create device representing Ethernet port.
2536 * Pointer to the port's name.
2539 * 0 on success, negative error value otherwise.
2542 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
2544 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
2545 struct rte_eth_dev *eth_dev;
2546 struct mrvl_priv *priv;
2549 eth_dev = rte_eth_dev_allocate(name);
2553 priv = mrvl_priv_create(name);
2559 eth_dev->data->mac_addrs =
2560 rte_zmalloc("mac_addrs",
2561 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
2562 if (!eth_dev->data->mac_addrs) {
2563 MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
2568 memset(&req, 0, sizeof(req));
2569 strcpy(req.ifr_name, name);
2570 ret = ioctl(fd, SIOCGIFHWADDR, &req);
2574 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
2575 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
2577 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
2578 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
2579 eth_dev->data->kdrv = RTE_KDRV_NONE;
2580 eth_dev->data->dev_private = priv;
2581 eth_dev->device = &vdev->device;
2582 eth_dev->dev_ops = &mrvl_ops;
2584 rte_eth_dev_probing_finish(eth_dev);
2587 rte_free(eth_dev->data->mac_addrs);
2589 rte_eth_dev_release_port(eth_dev);
2597 * Cleanup previously created device representing Ethernet port.
2600 * Pointer to the port name.
2603 mrvl_eth_dev_destroy(const char *name)
2605 struct rte_eth_dev *eth_dev;
2606 struct mrvl_priv *priv;
2608 eth_dev = rte_eth_dev_allocated(name);
2612 priv = eth_dev->data->dev_private;
2613 pp2_bpool_deinit(priv->bpool);
2614 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2616 rte_free(eth_dev->data->mac_addrs);
2617 rte_eth_dev_release_port(eth_dev);
2621 * Callback used by rte_kvargs_process() during argument parsing.
2624 * Pointer to the parsed key (unused).
2626 * Pointer to the parsed value.
2628 * Pointer to the extra arguments which contains address of the
2629 * table of pointers to parsed interface names.
2635 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
2638 struct mrvl_ifnames *ifnames = extra_args;
2640 ifnames->names[ifnames->idx++] = value;
2646 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
2649 mrvl_deinit_hifs(void)
2653 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
2655 pp2_hif_deinit(hifs[i]);
2657 used_hifs = MRVL_MUSDK_HIFS_RESERVED;
2658 memset(hifs, 0, sizeof(hifs));
2662 * DPDK callback to register the virtual device.
2665 * Pointer to the virtual device.
2668 * 0 on success, negative error value otherwise.
2671 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
2673 struct rte_kvargs *kvlist;
2674 struct mrvl_ifnames ifnames;
2676 uint32_t i, ifnum, cfgnum;
2679 params = rte_vdev_device_args(vdev);
2683 kvlist = rte_kvargs_parse(params, valid_args);
2687 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
2688 if (ifnum > RTE_DIM(ifnames.names))
2689 goto out_free_kvlist;
2692 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
2693 mrvl_get_ifnames, &ifnames);
2697 * The below system initialization should be done only once,
2698 * on the first provided configuration file
2700 if (!mrvl_qos_cfg) {
2701 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
2702 MRVL_LOG(INFO, "Parsing config file!");
2704 MRVL_LOG(ERR, "Cannot handle more than one config file!");
2705 goto out_free_kvlist;
2706 } else if (cfgnum == 1) {
2707 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
2708 mrvl_get_qoscfg, &mrvl_qos_cfg);
2715 MRVL_LOG(INFO, "Perform MUSDK initializations");
2717 ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
2719 goto out_free_kvlist;
2721 ret = mrvl_init_pp2();
2723 MRVL_LOG(ERR, "Failed to init PP!");
2724 rte_mvep_deinit(MVEP_MOD_T_PP2);
2725 goto out_free_kvlist;
2728 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
2729 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
2731 mrvl_lcore_first = RTE_MAX_LCORE;
2732 mrvl_lcore_last = 0;
2735 for (i = 0; i < ifnum; i++) {
2736 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
2737 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
2741 mrvl_dev_num += ifnum;
2743 rte_kvargs_free(kvlist);
2748 mrvl_eth_dev_destroy(ifnames.names[i]);
2750 if (mrvl_dev_num == 0) {
2752 rte_mvep_deinit(MVEP_MOD_T_PP2);
2755 rte_kvargs_free(kvlist);
2761 * DPDK callback to remove virtual device.
2764 * Pointer to the removed virtual device.
2767 * 0 on success, negative error value otherwise.
2770 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
2775 name = rte_vdev_device_name(vdev);
2779 MRVL_LOG(INFO, "Removing %s", name);
2781 RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
2782 char ifname[RTE_ETH_NAME_MAX_LEN];
2784 rte_eth_dev_get_name_by_port(i, ifname);
2785 mrvl_eth_dev_destroy(ifname);
2789 if (mrvl_dev_num == 0) {
2790 MRVL_LOG(INFO, "Perform MUSDK deinit");
2793 rte_mvep_deinit(MVEP_MOD_T_PP2);
2799 static struct rte_vdev_driver pmd_mrvl_drv = {
2800 .probe = rte_pmd_mrvl_probe,
2801 .remove = rte_pmd_mrvl_remove,
2804 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
2805 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
2807 RTE_INIT(mrvl_init_log)
2809 mrvl_logtype = rte_log_register("pmd.net.mvpp2");
2810 if (mrvl_logtype >= 0)
2811 rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);