1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
7 #include <rte_ethdev_driver.h>
8 #include <rte_kvargs.h>
10 #include <rte_malloc.h>
11 #include <rte_bus_vdev.h>
14 #include <linux/ethtool.h>
15 #include <linux/sockios.h>
17 #include <net/if_arp.h>
18 #include <sys/ioctl.h>
19 #include <sys/socket.h>
21 #include <sys/types.h>
23 #include <rte_mvep_common.h>
24 #include "mrvl_ethdev.h"
26 #include "mrvl_flow.h"
30 /* bitmask with reserved hifs */
31 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
32 /* bitmask with reserved bpools */
33 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
34 /* bitmask with reserved kernel RSS tables */
35 #define MRVL_MUSDK_RSS_RESERVED 0x01
36 /* maximum number of available hifs */
37 #define MRVL_MUSDK_HIFS_MAX 9
40 #define MRVL_MUSDK_PREFETCH_SHIFT 2
42 /* TCAM has 25 entries reserved for uc/mc filter entries */
43 #define MRVL_MAC_ADDRS_MAX 25
44 #define MRVL_MATCH_LEN 16
45 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
46 /* Maximum allowable packet size */
47 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
49 #define MRVL_IFACE_NAME_ARG "iface"
50 #define MRVL_CFG_ARG "cfg"
52 #define MRVL_BURST_SIZE 64
54 #define MRVL_ARP_LENGTH 28
56 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
58 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
59 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
61 /** Port Rx offload capabilities */
62 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
63 DEV_RX_OFFLOAD_JUMBO_FRAME | \
64 DEV_RX_OFFLOAD_CHECKSUM)
66 /** Port Tx offloads capabilities */
67 #define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
68 DEV_TX_OFFLOAD_UDP_CKSUM | \
69 DEV_TX_OFFLOAD_TCP_CKSUM)
71 static const char * const valid_args[] = {
77 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
78 static struct pp2_hif *hifs[RTE_MAX_LCORE];
79 static int used_bpools[PP2_NUM_PKT_PROC] = {
80 [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
83 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
84 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
85 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
90 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
95 * To use buffer harvesting based on loopback port shadow queue structure
96 * was introduced for buffers information bookkeeping.
98 * Before sending the packet, related buffer information (pp2_buff_inf) is
99 * stored in shadow queue. After packet is transmitted no longer used
100 * packet buffer is released back to it's original hardware pool,
101 * on condition it originated from interface.
102 * In case it was generated by application itself i.e: mbuf->port field is
103 * 0xff then its released to software mempool.
105 struct mrvl_shadow_txq {
106 int head; /* write index - used when sending buffers */
107 int tail; /* read index - used when releasing buffers */
108 u16 size; /* queue occupied size */
109 u16 num_to_release; /* number of buffers sent, that can be released */
110 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
114 struct mrvl_priv *priv;
115 struct rte_mempool *mp;
124 struct mrvl_priv *priv;
128 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
129 int tx_deferred_start;
132 static int mrvl_lcore_first;
133 static int mrvl_lcore_last;
134 static int mrvl_dev_num;
136 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
137 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
138 struct pp2_hif *hif, unsigned int core_id,
139 struct mrvl_shadow_txq *sq, int qid, int force);
141 #define MRVL_XSTATS_TBL_ENTRY(name) { \
142 #name, offsetof(struct pp2_ppio_statistics, name), \
143 sizeof(((struct pp2_ppio_statistics *)0)->name) \
146 /* Table with xstats data */
151 } mrvl_xstats_tbl[] = {
152 MRVL_XSTATS_TBL_ENTRY(rx_bytes),
153 MRVL_XSTATS_TBL_ENTRY(rx_packets),
154 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
155 MRVL_XSTATS_TBL_ENTRY(rx_errors),
156 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
157 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
158 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
159 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
160 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
161 MRVL_XSTATS_TBL_ENTRY(tx_bytes),
162 MRVL_XSTATS_TBL_ENTRY(tx_packets),
163 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
164 MRVL_XSTATS_TBL_ENTRY(tx_errors)
168 mrvl_get_bpool_size(int pp2_id, int pool_id)
173 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
174 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
180 mrvl_reserve_bit(int *bitmap, int max)
182 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
193 mrvl_init_hif(int core_id)
195 struct pp2_hif_params params;
196 char match[MRVL_MATCH_LEN];
199 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
201 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
205 snprintf(match, sizeof(match), "hif-%d", ret);
206 memset(¶ms, 0, sizeof(params));
207 params.match = match;
208 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
209 ret = pp2_hif_init(¶ms, &hifs[core_id]);
211 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
218 static inline struct pp2_hif*
219 mrvl_get_hif(struct mrvl_priv *priv, int core_id)
223 if (likely(hifs[core_id] != NULL))
224 return hifs[core_id];
226 rte_spinlock_lock(&priv->lock);
228 ret = mrvl_init_hif(core_id);
230 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
234 if (core_id < mrvl_lcore_first)
235 mrvl_lcore_first = core_id;
237 if (core_id > mrvl_lcore_last)
238 mrvl_lcore_last = core_id;
240 rte_spinlock_unlock(&priv->lock);
242 return hifs[core_id];
246 * Configure rss based on dpdk rss configuration.
249 * Pointer to private structure.
251 * Pointer to RSS configuration.
254 * 0 on success, negative error value otherwise.
257 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
259 if (rss_conf->rss_key)
260 MRVL_LOG(WARNING, "Changing hash key is not supported");
262 if (rss_conf->rss_hf == 0) {
263 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
264 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
265 priv->ppio_params.inqs_params.hash_type =
266 PP2_PPIO_HASH_T_2_TUPLE;
267 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
268 priv->ppio_params.inqs_params.hash_type =
269 PP2_PPIO_HASH_T_5_TUPLE;
270 priv->rss_hf_tcp = 1;
271 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
272 priv->ppio_params.inqs_params.hash_type =
273 PP2_PPIO_HASH_T_5_TUPLE;
274 priv->rss_hf_tcp = 0;
283 * Ethernet device configuration.
285 * Prepare the driver for a given number of TX and RX queues and
289 * Pointer to Ethernet device structure.
292 * 0 on success, negative error value otherwise.
295 mrvl_dev_configure(struct rte_eth_dev *dev)
297 struct mrvl_priv *priv = dev->data->dev_private;
301 MRVL_LOG(INFO, "Device reconfiguration is not supported");
305 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
306 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
307 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
308 dev->data->dev_conf.rxmode.mq_mode);
312 if (dev->data->dev_conf.rxmode.split_hdr_size) {
313 MRVL_LOG(INFO, "Split headers not supported");
317 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
318 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
319 ETHER_HDR_LEN - ETHER_CRC_LEN;
321 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
322 dev->data->nb_rx_queues);
326 ret = mrvl_configure_txqs(priv, dev->data->port_id,
327 dev->data->nb_tx_queues);
331 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
332 priv->ppio_params.maintain_stats = 1;
333 priv->nb_rx_queues = dev->data->nb_rx_queues;
335 ret = mrvl_tm_init(dev);
339 if (dev->data->nb_rx_queues == 1 &&
340 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
341 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
342 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
347 return mrvl_configure_rss(priv,
348 &dev->data->dev_conf.rx_adv_conf.rss_conf);
352 * DPDK callback to change the MTU.
354 * Setting the MTU affects hardware MRU (packets larger than the MRU
358 * Pointer to Ethernet device structure.
363 * 0 on success, negative error value otherwise.
366 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
368 struct mrvl_priv *priv = dev->data->dev_private;
369 /* extra MV_MH_SIZE bytes are required for Marvell tag */
370 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
373 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
379 ret = pp2_ppio_set_mru(priv->ppio, mru);
383 return pp2_ppio_set_mtu(priv->ppio, mtu);
387 * DPDK callback to bring the link up.
390 * Pointer to Ethernet device structure.
393 * 0 on success, negative error value otherwise.
396 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
398 struct mrvl_priv *priv = dev->data->dev_private;
404 ret = pp2_ppio_enable(priv->ppio);
409 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
410 * as pp2_ppio_enable() changes port->t_mode from default 0 to
411 * PP2_TRAFFIC_INGRESS_EGRESS.
413 * Set mtu to default DPDK value here.
415 ret = mrvl_mtu_set(dev, dev->data->mtu);
417 pp2_ppio_disable(priv->ppio);
423 * DPDK callback to bring the link down.
426 * Pointer to Ethernet device structure.
429 * 0 on success, negative error value otherwise.
432 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
434 struct mrvl_priv *priv = dev->data->dev_private;
439 return pp2_ppio_disable(priv->ppio);
443 * DPDK callback to start tx queue.
446 * Pointer to Ethernet device structure.
448 * Transmit queue index.
451 * 0 on success, negative error value otherwise.
454 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
456 struct mrvl_priv *priv = dev->data->dev_private;
462 /* passing 1 enables given tx queue */
463 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
465 MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
469 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
475 * DPDK callback to stop tx queue.
478 * Pointer to Ethernet device structure.
480 * Transmit queue index.
483 * 0 on success, negative error value otherwise.
486 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
488 struct mrvl_priv *priv = dev->data->dev_private;
494 /* passing 0 disables given tx queue */
495 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
497 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
501 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
507 * DPDK callback to start the device.
510 * Pointer to Ethernet device structure.
513 * 0 on success, negative errno value on failure.
516 mrvl_dev_start(struct rte_eth_dev *dev)
518 struct mrvl_priv *priv = dev->data->dev_private;
519 char match[MRVL_MATCH_LEN];
520 int ret = 0, i, def_init_size;
523 return mrvl_dev_set_link_up(dev);
525 snprintf(match, sizeof(match), "ppio-%d:%d",
526 priv->pp_id, priv->ppio_id);
527 priv->ppio_params.match = match;
530 * Calculate the minimum bpool size for refill feature as follows:
531 * 2 default burst sizes multiply by number of rx queues.
532 * If the bpool size will be below this value, new buffers will
533 * be added to the pool.
535 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
537 /* In case initial bpool size configured in queues setup is
538 * smaller than minimum size add more buffers
540 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
541 if (priv->bpool_init_size < def_init_size) {
542 int buffs_to_add = def_init_size - priv->bpool_init_size;
544 priv->bpool_init_size += buffs_to_add;
545 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
547 MRVL_LOG(ERR, "Failed to add buffers to bpool");
551 * Calculate the maximum bpool size for refill feature as follows:
552 * maximum number of descriptors in rx queue multiply by number
553 * of rx queues plus minimum bpool size.
554 * In case the bpool size will exceed this value, superfluous buffers
557 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
558 priv->bpool_min_size;
560 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
562 MRVL_LOG(ERR, "Failed to init ppio");
567 * In case there are some some stale uc/mc mac addresses flush them
568 * here. It cannot be done during mrvl_dev_close() as port information
569 * is already gone at that point (due to pp2_ppio_deinit() in
572 if (!priv->uc_mc_flushed) {
573 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
576 "Failed to flush uc/mc filter list");
579 priv->uc_mc_flushed = 1;
582 if (!priv->vlan_flushed) {
583 ret = pp2_ppio_flush_vlan(priv->ppio);
585 MRVL_LOG(ERR, "Failed to flush vlan list");
588 * once pp2_ppio_flush_vlan() is supported jump to out
592 priv->vlan_flushed = 1;
595 /* For default QoS config, don't start classifier. */
597 ret = mrvl_start_qos_mapping(priv);
599 MRVL_LOG(ERR, "Failed to setup QoS mapping");
604 ret = mrvl_dev_set_link_up(dev);
606 MRVL_LOG(ERR, "Failed to set link up");
610 /* start tx queues */
611 for (i = 0; i < dev->data->nb_tx_queues; i++) {
612 struct mrvl_txq *txq = dev->data->tx_queues[i];
614 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
616 if (!txq->tx_deferred_start)
620 * All txqs are started by default. Stop them
621 * so that tx_deferred_start works as expected.
623 ret = mrvl_tx_queue_stop(dev, i);
633 MRVL_LOG(ERR, "Failed to start device");
634 pp2_ppio_deinit(priv->ppio);
639 * Flush receive queues.
642 * Pointer to Ethernet device structure.
645 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
649 MRVL_LOG(INFO, "Flushing rx queues");
650 for (i = 0; i < dev->data->nb_rx_queues; i++) {
654 struct mrvl_rxq *q = dev->data->rx_queues[i];
655 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
657 num = MRVL_PP2_RXD_MAX;
658 ret = pp2_ppio_recv(q->priv->ppio,
659 q->priv->rxq_map[q->queue_id].tc,
660 q->priv->rxq_map[q->queue_id].inq,
661 descs, (uint16_t *)&num);
662 } while (ret == 0 && num);
667 * Flush transmit shadow queues.
670 * Pointer to Ethernet device structure.
673 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
676 struct mrvl_txq *txq;
678 MRVL_LOG(INFO, "Flushing tx shadow queues");
679 for (i = 0; i < dev->data->nb_tx_queues; i++) {
680 txq = (struct mrvl_txq *)dev->data->tx_queues[i];
682 for (j = 0; j < RTE_MAX_LCORE; j++) {
683 struct mrvl_shadow_txq *sq;
688 sq = &txq->shadow_txqs[j];
689 mrvl_free_sent_buffers(txq->priv->ppio,
690 hifs[j], j, sq, txq->queue_id, 1);
691 while (sq->tail != sq->head) {
692 uint64_t addr = cookie_addr_high |
693 sq->ent[sq->tail].buff.cookie;
695 (struct rte_mbuf *)addr);
696 sq->tail = (sq->tail + 1) &
697 MRVL_PP2_TX_SHADOWQ_MASK;
699 memset(sq, 0, sizeof(*sq));
705 * Flush hardware bpool (buffer-pool).
708 * Pointer to Ethernet device structure.
711 mrvl_flush_bpool(struct rte_eth_dev *dev)
713 struct mrvl_priv *priv = dev->data->dev_private;
717 unsigned int core_id = rte_lcore_id();
719 if (core_id == LCORE_ID_ANY)
722 hif = mrvl_get_hif(priv, core_id);
724 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
726 MRVL_LOG(ERR, "Failed to get bpool buffers number");
731 struct pp2_buff_inf inf;
734 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
738 addr = cookie_addr_high | inf.cookie;
739 rte_pktmbuf_free((struct rte_mbuf *)addr);
744 * DPDK callback to stop the device.
747 * Pointer to Ethernet device structure.
750 mrvl_dev_stop(struct rte_eth_dev *dev)
752 mrvl_dev_set_link_down(dev);
756 * DPDK callback to close the device.
759 * Pointer to Ethernet device structure.
762 mrvl_dev_close(struct rte_eth_dev *dev)
764 struct mrvl_priv *priv = dev->data->dev_private;
767 mrvl_flush_rx_queues(dev);
768 mrvl_flush_tx_shadow_queues(dev);
769 mrvl_flow_deinit(dev);
770 mrvl_mtr_deinit(dev);
772 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
773 struct pp2_ppio_tc_params *tc_params =
774 &priv->ppio_params.inqs_params.tcs_params[i];
776 if (tc_params->inqs_params) {
777 rte_free(tc_params->inqs_params);
778 tc_params->inqs_params = NULL;
783 pp2_cls_tbl_deinit(priv->cls_tbl);
784 priv->cls_tbl = NULL;
788 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
789 priv->qos_tbl = NULL;
792 mrvl_flush_bpool(dev);
796 pp2_ppio_deinit(priv->ppio);
800 /* policer must be released after ppio deinitialization */
801 if (priv->default_policer) {
802 pp2_cls_plcr_deinit(priv->default_policer);
803 priv->default_policer = NULL;
808 * DPDK callback to retrieve physical link information.
811 * Pointer to Ethernet device structure.
812 * @param wait_to_complete
813 * Wait for request completion (ignored).
816 * 0 on success, negative error value otherwise.
819 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
823 * once MUSDK provides necessary API use it here
825 struct mrvl_priv *priv = dev->data->dev_private;
826 struct ethtool_cmd edata;
828 int ret, fd, link_up;
833 edata.cmd = ETHTOOL_GSET;
835 strcpy(req.ifr_name, dev->data->name);
836 req.ifr_data = (void *)&edata;
838 fd = socket(AF_INET, SOCK_DGRAM, 0);
842 ret = ioctl(fd, SIOCETHTOOL, &req);
850 switch (ethtool_cmd_speed(&edata)) {
852 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
855 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
858 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
861 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
864 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
867 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
868 ETH_LINK_HALF_DUPLEX;
869 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
871 pp2_ppio_get_link_state(priv->ppio, &link_up);
872 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
878 * DPDK callback to enable promiscuous mode.
881 * Pointer to Ethernet device structure.
884 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
886 struct mrvl_priv *priv = dev->data->dev_private;
895 ret = pp2_ppio_set_promisc(priv->ppio, 1);
897 MRVL_LOG(ERR, "Failed to enable promiscuous mode");
901 * DPDK callback to enable allmulti mode.
904 * Pointer to Ethernet device structure.
907 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
909 struct mrvl_priv *priv = dev->data->dev_private;
918 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
920 MRVL_LOG(ERR, "Failed enable all-multicast mode");
924 * DPDK callback to disable promiscuous mode.
927 * Pointer to Ethernet device structure.
930 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
932 struct mrvl_priv *priv = dev->data->dev_private;
938 ret = pp2_ppio_set_promisc(priv->ppio, 0);
940 MRVL_LOG(ERR, "Failed to disable promiscuous mode");
944 * DPDK callback to disable allmulticast mode.
947 * Pointer to Ethernet device structure.
950 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
952 struct mrvl_priv *priv = dev->data->dev_private;
958 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
960 MRVL_LOG(ERR, "Failed to disable all-multicast mode");
964 * DPDK callback to remove a MAC address.
967 * Pointer to Ethernet device structure.
972 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
974 struct mrvl_priv *priv = dev->data->dev_private;
975 char buf[ETHER_ADDR_FMT_SIZE];
984 ret = pp2_ppio_remove_mac_addr(priv->ppio,
985 dev->data->mac_addrs[index].addr_bytes);
987 ether_format_addr(buf, sizeof(buf),
988 &dev->data->mac_addrs[index]);
989 MRVL_LOG(ERR, "Failed to remove mac %s", buf);
994 * DPDK callback to add a MAC address.
997 * Pointer to Ethernet device structure.
999 * MAC address to register.
1001 * MAC address index.
1003 * VMDq pool index to associate address with (unused).
1006 * 0 on success, negative error value otherwise.
1009 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1010 uint32_t index, uint32_t vmdq __rte_unused)
1012 struct mrvl_priv *priv = dev->data->dev_private;
1013 char buf[ETHER_ADDR_FMT_SIZE];
1020 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
1027 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
1028 * parameter uc_filter_max. Maximum number of mc addresses is then
1029 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
1032 * If more than uc_filter_max uc addresses were added to filter list
1033 * then NIC will switch to promiscuous mode automatically.
1035 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
1036 * were added to filter list then NIC will switch to all-multicast mode
1039 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
1041 ether_format_addr(buf, sizeof(buf), mac_addr);
1042 MRVL_LOG(ERR, "Failed to add mac %s", buf);
1050 * DPDK callback to set the primary MAC address.
1053 * Pointer to Ethernet device structure.
1055 * MAC address to register.
1058 * 0 on success, negative error value otherwise.
1061 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1063 struct mrvl_priv *priv = dev->data->dev_private;
1072 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
1074 char buf[ETHER_ADDR_FMT_SIZE];
1075 ether_format_addr(buf, sizeof(buf), mac_addr);
1076 MRVL_LOG(ERR, "Failed to set mac to %s", buf);
1083 * DPDK callback to get device statistics.
1086 * Pointer to Ethernet device structure.
1088 * Stats structure output buffer.
1091 * 0 on success, negative error value otherwise.
1094 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1096 struct mrvl_priv *priv = dev->data->dev_private;
1097 struct pp2_ppio_statistics ppio_stats;
1098 uint64_t drop_mac = 0;
1099 unsigned int i, idx, ret;
1104 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1105 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1106 struct pp2_ppio_inq_statistics rx_stats;
1111 idx = rxq->queue_id;
1112 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1114 "rx queue %d stats out of range (0 - %d)",
1115 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1119 ret = pp2_ppio_inq_get_statistics(priv->ppio,
1120 priv->rxq_map[idx].tc,
1121 priv->rxq_map[idx].inq,
1123 if (unlikely(ret)) {
1125 "Failed to update rx queue %d stats", idx);
1129 stats->q_ibytes[idx] = rxq->bytes_recv;
1130 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
1131 stats->q_errors[idx] = rx_stats.drop_early +
1132 rx_stats.drop_fullq +
1135 stats->ibytes += rxq->bytes_recv;
1136 drop_mac += rxq->drop_mac;
1139 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1140 struct mrvl_txq *txq = dev->data->tx_queues[i];
1141 struct pp2_ppio_outq_statistics tx_stats;
1146 idx = txq->queue_id;
1147 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1149 "tx queue %d stats out of range (0 - %d)",
1150 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1153 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1155 if (unlikely(ret)) {
1157 "Failed to update tx queue %d stats", idx);
1161 stats->q_opackets[idx] = tx_stats.deq_desc;
1162 stats->q_obytes[idx] = txq->bytes_sent;
1163 stats->obytes += txq->bytes_sent;
1166 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1167 if (unlikely(ret)) {
1168 MRVL_LOG(ERR, "Failed to update port statistics");
1172 stats->ipackets += ppio_stats.rx_packets - drop_mac;
1173 stats->opackets += ppio_stats.tx_packets;
1174 stats->imissed += ppio_stats.rx_fullq_dropped +
1175 ppio_stats.rx_bm_dropped +
1176 ppio_stats.rx_early_dropped +
1177 ppio_stats.rx_fifo_dropped +
1178 ppio_stats.rx_cls_dropped;
1179 stats->ierrors = drop_mac;
1185 * DPDK callback to clear device statistics.
1188 * Pointer to Ethernet device structure.
1191 mrvl_stats_reset(struct rte_eth_dev *dev)
1193 struct mrvl_priv *priv = dev->data->dev_private;
1199 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1200 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1202 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1203 priv->rxq_map[i].inq, NULL, 1);
1204 rxq->bytes_recv = 0;
1208 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1209 struct mrvl_txq *txq = dev->data->tx_queues[i];
1211 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1212 txq->bytes_sent = 0;
1215 pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1219 * DPDK callback to get extended statistics.
1222 * Pointer to Ethernet device structure.
1224 * Pointer to xstats table.
1226 * Number of entries in xstats table.
1228 * Negative value on error, number of read xstats otherwise.
1231 mrvl_xstats_get(struct rte_eth_dev *dev,
1232 struct rte_eth_xstat *stats, unsigned int n)
1234 struct mrvl_priv *priv = dev->data->dev_private;
1235 struct pp2_ppio_statistics ppio_stats;
1241 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1242 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
1245 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
1246 val = *(uint32_t *)((uint8_t *)&ppio_stats +
1247 mrvl_xstats_tbl[i].offset);
1248 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
1249 val = *(uint64_t *)((uint8_t *)&ppio_stats +
1250 mrvl_xstats_tbl[i].offset);
1255 stats[i].value = val;
1262 * DPDK callback to reset extended statistics.
1265 * Pointer to Ethernet device structure.
1268 mrvl_xstats_reset(struct rte_eth_dev *dev)
1270 mrvl_stats_reset(dev);
1274 * DPDK callback to get extended statistics names.
1276 * @param dev (unused)
1277 * Pointer to Ethernet device structure.
1278 * @param xstats_names
1279 * Pointer to xstats names table.
1281 * Size of the xstats names table.
1283 * Number of read names.
1286 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1287 struct rte_eth_xstat_name *xstats_names,
1293 return RTE_DIM(mrvl_xstats_tbl);
1295 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
1296 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
1297 mrvl_xstats_tbl[i].name);
1303 * DPDK callback to get information about the device.
1306 * Pointer to Ethernet device structure (unused).
1308 * Info structure output buffer.
1311 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
1312 struct rte_eth_dev_info *info)
1314 info->speed_capa = ETH_LINK_SPEED_10M |
1315 ETH_LINK_SPEED_100M |
1319 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1320 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1321 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1323 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1324 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1325 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1327 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1328 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1329 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1331 info->rx_offload_capa = MRVL_RX_OFFLOADS;
1332 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
1334 info->tx_offload_capa = MRVL_TX_OFFLOADS;
1335 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
1337 info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1338 ETH_RSS_NONFRAG_IPV4_TCP |
1339 ETH_RSS_NONFRAG_IPV4_UDP;
1341 /* By default packets are dropped if no descriptors are available */
1342 info->default_rxconf.rx_drop_en = 1;
1344 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1348 * Return supported packet types.
1351 * Pointer to Ethernet device structure (unused).
1354 * Const pointer to the table with supported packet types.
1356 static const uint32_t *
1357 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1359 static const uint32_t ptypes[] = {
1361 RTE_PTYPE_L2_ETHER_VLAN,
1362 RTE_PTYPE_L2_ETHER_QINQ,
1364 RTE_PTYPE_L3_IPV4_EXT,
1365 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1367 RTE_PTYPE_L3_IPV6_EXT,
1368 RTE_PTYPE_L2_ETHER_ARP,
1377 * DPDK callback to get information about specific receive queue.
1380 * Pointer to Ethernet device structure.
1381 * @param rx_queue_id
1382 * Receive queue index.
1384 * Receive queue information structure.
1386 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1387 struct rte_eth_rxq_info *qinfo)
1389 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1390 struct mrvl_priv *priv = dev->data->dev_private;
1391 int inq = priv->rxq_map[rx_queue_id].inq;
1392 int tc = priv->rxq_map[rx_queue_id].tc;
1393 struct pp2_ppio_tc_params *tc_params =
1394 &priv->ppio_params.inqs_params.tcs_params[tc];
1397 qinfo->nb_desc = tc_params->inqs_params[inq].size;
1401 * DPDK callback to get information about specific transmit queue.
1404 * Pointer to Ethernet device structure.
1405 * @param tx_queue_id
1406 * Transmit queue index.
1408 * Transmit queue information structure.
1410 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1411 struct rte_eth_txq_info *qinfo)
1413 struct mrvl_priv *priv = dev->data->dev_private;
1414 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
1417 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1418 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1422 * DPDK callback to Configure a VLAN filter.
1425 * Pointer to Ethernet device structure.
1427 * VLAN ID to filter.
1432 * 0 on success, negative error value otherwise.
1435 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1437 struct mrvl_priv *priv = dev->data->dev_private;
1445 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1446 pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1450 * Release buffers to hardware bpool (buffer-pool)
1453 * Receive queue pointer.
1455 * Number of buffers to release to bpool.
1458 * 0 on success, negative error value otherwise.
1461 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1463 struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
1464 struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
1466 unsigned int core_id;
1467 struct pp2_hif *hif;
1468 struct pp2_bpool *bpool;
1470 core_id = rte_lcore_id();
1471 if (core_id == LCORE_ID_ANY)
1474 hif = mrvl_get_hif(rxq->priv, core_id);
1478 bpool = rxq->priv->bpool;
1480 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1484 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1486 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1488 for (i = 0; i < num; i++) {
1489 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1490 != cookie_addr_high) {
1492 "mbuf virtual addr high 0x%lx out of range",
1493 (uint64_t)mbufs[i] >> 32);
1497 entries[i].buff.addr =
1498 rte_mbuf_data_iova_default(mbufs[i]);
1499 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
1500 entries[i].bpool = bpool;
1503 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1504 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1511 for (; i < num; i++)
1512 rte_pktmbuf_free(mbufs[i]);
1518 * DPDK callback to configure the receive queue.
1521 * Pointer to Ethernet device structure.
1525 * Number of descriptors to configure in queue.
1527 * NUMA socket on which memory must be allocated.
1529 * Thresholds parameters.
1531 * Memory pool for buffer allocations.
1534 * 0 on success, negative error value otherwise.
1537 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1538 unsigned int socket,
1539 const struct rte_eth_rxconf *conf,
1540 struct rte_mempool *mp)
1542 struct mrvl_priv *priv = dev->data->dev_private;
1543 struct mrvl_rxq *rxq;
1545 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1549 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1551 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1553 * Unknown TC mapping, mapping will not have a correct queue.
1555 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
1556 idx, priv->ppio_id);
1560 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
1561 MRVL_PKT_EFFEC_OFFS;
1562 if (min_size < max_rx_pkt_len) {
1564 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
1565 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
1566 MRVL_PKT_EFFEC_OFFS,
1571 if (dev->data->rx_queues[idx]) {
1572 rte_free(dev->data->rx_queues[idx]);
1573 dev->data->rx_queues[idx] = NULL;
1576 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
1582 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
1583 rxq->queue_id = idx;
1584 rxq->port_id = dev->data->port_id;
1585 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
1587 tc = priv->rxq_map[rxq->queue_id].tc,
1588 inq = priv->rxq_map[rxq->queue_id].inq;
1589 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
1592 ret = mrvl_fill_bpool(rxq, desc);
1598 priv->bpool_init_size += desc;
1600 dev->data->rx_queues[idx] = rxq;
1606 * DPDK callback to release the receive queue.
1609 * Generic receive queue pointer.
1612 mrvl_rx_queue_release(void *rxq)
1614 struct mrvl_rxq *q = rxq;
1615 struct pp2_ppio_tc_params *tc_params;
1616 int i, num, tc, inq;
1617 struct pp2_hif *hif;
1618 unsigned int core_id = rte_lcore_id();
1620 if (core_id == LCORE_ID_ANY)
1626 hif = mrvl_get_hif(q->priv, core_id);
1631 tc = q->priv->rxq_map[q->queue_id].tc;
1632 inq = q->priv->rxq_map[q->queue_id].inq;
1633 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1634 num = tc_params->inqs_params[inq].size;
1635 for (i = 0; i < num; i++) {
1636 struct pp2_buff_inf inf;
1639 pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
1640 addr = cookie_addr_high | inf.cookie;
1641 rte_pktmbuf_free((struct rte_mbuf *)addr);
1648 * DPDK callback to configure the transmit queue.
1651 * Pointer to Ethernet device structure.
1653 * Transmit queue index.
1655 * Number of descriptors to configure in the queue.
1657 * NUMA socket on which memory must be allocated.
1659 * Tx queue configuration parameters.
1662 * 0 on success, negative error value otherwise.
1665 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1666 unsigned int socket,
1667 const struct rte_eth_txconf *conf)
1669 struct mrvl_priv *priv = dev->data->dev_private;
1670 struct mrvl_txq *txq;
1672 if (dev->data->tx_queues[idx]) {
1673 rte_free(dev->data->tx_queues[idx]);
1674 dev->data->tx_queues[idx] = NULL;
1677 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1682 txq->queue_id = idx;
1683 txq->port_id = dev->data->port_id;
1684 txq->tx_deferred_start = conf->tx_deferred_start;
1685 dev->data->tx_queues[idx] = txq;
1687 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1693 * DPDK callback to release the transmit queue.
1696 * Generic transmit queue pointer.
1699 mrvl_tx_queue_release(void *txq)
1701 struct mrvl_txq *q = txq;
1710 * DPDK callback to get flow control configuration.
1713 * Pointer to Ethernet device structure.
1715 * Pointer to the flow control configuration.
1718 * 0 on success, negative error value otherwise.
1721 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1723 struct mrvl_priv *priv = dev->data->dev_private;
1729 ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
1731 MRVL_LOG(ERR, "Failed to read rx pause state");
1735 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
1741 * DPDK callback to set flow control configuration.
1744 * Pointer to Ethernet device structure.
1746 * Pointer to the flow control configuration.
1749 * 0 on success, negative error value otherwise.
1752 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1754 struct mrvl_priv *priv = dev->data->dev_private;
1759 if (fc_conf->high_water ||
1760 fc_conf->low_water ||
1761 fc_conf->pause_time ||
1762 fc_conf->mac_ctrl_frame_fwd ||
1764 MRVL_LOG(ERR, "Flowctrl parameter is not supported");
1769 if (fc_conf->mode == RTE_FC_NONE ||
1770 fc_conf->mode == RTE_FC_RX_PAUSE) {
1773 en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
1774 ret = pp2_ppio_set_rx_pause(priv->ppio, en);
1777 "Failed to change flowctrl on RX side");
1786 * Update RSS hash configuration
1789 * Pointer to Ethernet device structure.
1791 * Pointer to RSS configuration.
1794 * 0 on success, negative error value otherwise.
1797 mrvl_rss_hash_update(struct rte_eth_dev *dev,
1798 struct rte_eth_rss_conf *rss_conf)
1800 struct mrvl_priv *priv = dev->data->dev_private;
1805 return mrvl_configure_rss(priv, rss_conf);
1809 * DPDK callback to get RSS hash configuration.
1812 * Pointer to Ethernet device structure.
1814 * Pointer to RSS configuration.
1820 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
1821 struct rte_eth_rss_conf *rss_conf)
1823 struct mrvl_priv *priv = dev->data->dev_private;
1824 enum pp2_ppio_hash_type hash_type =
1825 priv->ppio_params.inqs_params.hash_type;
1827 rss_conf->rss_key = NULL;
1829 if (hash_type == PP2_PPIO_HASH_T_NONE)
1830 rss_conf->rss_hf = 0;
1831 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
1832 rss_conf->rss_hf = ETH_RSS_IPV4;
1833 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
1834 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
1835 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
1836 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
1842 * DPDK callback to get rte_flow callbacks.
1845 * Pointer to the device structure.
1849 * Flow filter operation.
1851 * Pointer to pass the flow ops.
1854 * 0 on success, negative error value otherwise.
1857 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
1858 enum rte_filter_type filter_type,
1859 enum rte_filter_op filter_op, void *arg)
1861 switch (filter_type) {
1862 case RTE_ETH_FILTER_GENERIC:
1863 if (filter_op != RTE_ETH_FILTER_GET)
1865 *(const void **)arg = &mrvl_flow_ops;
1868 MRVL_LOG(WARNING, "Filter type (%d) not supported",
1875 * DPDK callback to get rte_mtr callbacks.
1878 * Pointer to the device structure.
1880 * Pointer to pass the mtr ops.
1886 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
1888 *(const void **)ops = &mrvl_mtr_ops;
1894 * DPDK callback to get rte_tm callbacks.
1897 * Pointer to the device structure.
1899 * Pointer to pass the tm ops.
1905 mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
1907 *(const void **)ops = &mrvl_tm_ops;
1912 static const struct eth_dev_ops mrvl_ops = {
1913 .dev_configure = mrvl_dev_configure,
1914 .dev_start = mrvl_dev_start,
1915 .dev_stop = mrvl_dev_stop,
1916 .dev_set_link_up = mrvl_dev_set_link_up,
1917 .dev_set_link_down = mrvl_dev_set_link_down,
1918 .dev_close = mrvl_dev_close,
1919 .link_update = mrvl_link_update,
1920 .promiscuous_enable = mrvl_promiscuous_enable,
1921 .allmulticast_enable = mrvl_allmulticast_enable,
1922 .promiscuous_disable = mrvl_promiscuous_disable,
1923 .allmulticast_disable = mrvl_allmulticast_disable,
1924 .mac_addr_remove = mrvl_mac_addr_remove,
1925 .mac_addr_add = mrvl_mac_addr_add,
1926 .mac_addr_set = mrvl_mac_addr_set,
1927 .mtu_set = mrvl_mtu_set,
1928 .stats_get = mrvl_stats_get,
1929 .stats_reset = mrvl_stats_reset,
1930 .xstats_get = mrvl_xstats_get,
1931 .xstats_reset = mrvl_xstats_reset,
1932 .xstats_get_names = mrvl_xstats_get_names,
1933 .dev_infos_get = mrvl_dev_infos_get,
1934 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
1935 .rxq_info_get = mrvl_rxq_info_get,
1936 .txq_info_get = mrvl_txq_info_get,
1937 .vlan_filter_set = mrvl_vlan_filter_set,
1938 .tx_queue_start = mrvl_tx_queue_start,
1939 .tx_queue_stop = mrvl_tx_queue_stop,
1940 .rx_queue_setup = mrvl_rx_queue_setup,
1941 .rx_queue_release = mrvl_rx_queue_release,
1942 .tx_queue_setup = mrvl_tx_queue_setup,
1943 .tx_queue_release = mrvl_tx_queue_release,
1944 .flow_ctrl_get = mrvl_flow_ctrl_get,
1945 .flow_ctrl_set = mrvl_flow_ctrl_set,
1946 .rss_hash_update = mrvl_rss_hash_update,
1947 .rss_hash_conf_get = mrvl_rss_hash_conf_get,
1948 .filter_ctrl = mrvl_eth_filter_ctrl,
1949 .mtr_ops_get = mrvl_mtr_ops_get,
1950 .tm_ops_get = mrvl_tm_ops_get,
1954 * Return packet type information and l3/l4 offsets.
1957 * Pointer to the received packet descriptor.
1964 * Packet type information.
1966 static inline uint64_t
1967 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
1968 uint8_t *l3_offset, uint8_t *l4_offset)
1970 enum pp2_inq_l3_type l3_type;
1971 enum pp2_inq_l4_type l4_type;
1972 enum pp2_inq_vlan_tag vlan_tag;
1973 uint64_t packet_type;
1975 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
1976 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
1977 pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
1979 packet_type = RTE_PTYPE_L2_ETHER;
1982 case PP2_INQ_VLAN_TAG_SINGLE:
1983 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1985 case PP2_INQ_VLAN_TAG_DOUBLE:
1986 case PP2_INQ_VLAN_TAG_TRIPLE:
1987 packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
1994 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
1995 packet_type |= RTE_PTYPE_L3_IPV4;
1997 case PP2_INQ_L3_TYPE_IPV4_OK:
1998 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
2000 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
2001 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
2003 case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
2004 packet_type |= RTE_PTYPE_L3_IPV6;
2006 case PP2_INQ_L3_TYPE_IPV6_EXT:
2007 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
2009 case PP2_INQ_L3_TYPE_ARP:
2010 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
2012 * In case of ARP l4_offset is set to wrong value.
2013 * Set it to proper one so that later on mbuf->l3_len can be
2014 * calculated subtracting l4_offset and l3_offset.
2016 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
2019 MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
2024 case PP2_INQ_L4_TYPE_TCP:
2025 packet_type |= RTE_PTYPE_L4_TCP;
2027 case PP2_INQ_L4_TYPE_UDP:
2028 packet_type |= RTE_PTYPE_L4_UDP;
2031 MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
2039 * Get offload information from the received packet descriptor.
2042 * Pointer to the received packet descriptor.
2045 * Mbuf offload flags.
2047 static inline uint64_t
2048 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
2051 enum pp2_inq_desc_status status;
2053 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
2054 if (unlikely(status != PP2_DESC_ERR_OK))
2055 flags = PKT_RX_IP_CKSUM_BAD;
2057 flags = PKT_RX_IP_CKSUM_GOOD;
2059 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
2060 if (unlikely(status != PP2_DESC_ERR_OK))
2061 flags |= PKT_RX_L4_CKSUM_BAD;
2063 flags |= PKT_RX_L4_CKSUM_GOOD;
2069 * DPDK callback for receive.
2072 * Generic pointer to the receive queue.
2074 * Array to store received packets.
2076 * Maximum number of packets in array.
2079 * Number of packets successfully received.
2082 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2084 struct mrvl_rxq *q = rxq;
2085 struct pp2_ppio_desc descs[nb_pkts];
2086 struct pp2_bpool *bpool;
2087 int i, ret, rx_done = 0;
2089 struct pp2_hif *hif;
2090 unsigned int core_id = rte_lcore_id();
2092 hif = mrvl_get_hif(q->priv, core_id);
2094 if (unlikely(!q->priv->ppio || !hif))
2097 bpool = q->priv->bpool;
2099 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
2100 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
2101 if (unlikely(ret < 0)) {
2102 MRVL_LOG(ERR, "Failed to receive packets");
2105 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
2107 for (i = 0; i < nb_pkts; i++) {
2108 struct rte_mbuf *mbuf;
2109 uint8_t l3_offset, l4_offset;
2110 enum pp2_inq_desc_status status;
2113 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2114 struct pp2_ppio_desc *pref_desc;
2117 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
2118 pref_addr = cookie_addr_high |
2119 pp2_ppio_inq_desc_get_cookie(pref_desc);
2120 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
2121 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
2124 addr = cookie_addr_high |
2125 pp2_ppio_inq_desc_get_cookie(&descs[i]);
2126 mbuf = (struct rte_mbuf *)addr;
2127 rte_pktmbuf_reset(mbuf);
2129 /* drop packet in case of mac, overrun or resource error */
2130 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
2131 if (unlikely(status != PP2_DESC_ERR_OK)) {
2132 struct pp2_buff_inf binf = {
2133 .addr = rte_mbuf_data_iova_default(mbuf),
2134 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
2137 pp2_bpool_put_buff(hif, bpool, &binf);
2138 mrvl_port_bpool_size
2139 [bpool->pp2_id][bpool->id][core_id]++;
2144 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
2145 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
2146 mbuf->data_len = mbuf->pkt_len;
2147 mbuf->port = q->port_id;
2149 mrvl_desc_to_packet_type_and_offset(&descs[i],
2152 mbuf->l2_len = l3_offset;
2153 mbuf->l3_len = l4_offset - l3_offset;
2155 if (likely(q->cksum_enabled))
2156 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
2158 rx_pkts[rx_done++] = mbuf;
2159 q->bytes_recv += mbuf->pkt_len;
2162 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
2163 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
2165 if (unlikely(num <= q->priv->bpool_min_size ||
2166 (!rx_done && num < q->priv->bpool_init_size))) {
2167 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
2169 MRVL_LOG(ERR, "Failed to fill bpool");
2170 } else if (unlikely(num > q->priv->bpool_max_size)) {
2172 int pkt_to_remove = num - q->priv->bpool_init_size;
2173 struct rte_mbuf *mbuf;
2174 struct pp2_buff_inf buff;
2177 "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
2178 bpool->pp2_id, q->priv->ppio->port_id,
2179 bpool->id, pkt_to_remove, num,
2180 q->priv->bpool_init_size);
2182 for (i = 0; i < pkt_to_remove; i++) {
2183 ret = pp2_bpool_get_buff(hif, bpool, &buff);
2186 mbuf = (struct rte_mbuf *)
2187 (cookie_addr_high | buff.cookie);
2188 rte_pktmbuf_free(mbuf);
2190 mrvl_port_bpool_size
2191 [bpool->pp2_id][bpool->id][core_id] -= i;
2193 rte_spinlock_unlock(&q->priv->lock);
2200 * Prepare offload information.
2204 * @param packet_type
2205 * Packet type bitfield.
2207 * Pointer to the pp2_ouq_l3_type structure.
2209 * Pointer to the pp2_outq_l4_type structure.
2210 * @param gen_l3_cksum
2211 * Will be set to 1 in case l3 checksum is computed.
2213 * Will be set to 1 in case l4 checksum is computed.
2216 * 0 on success, negative error value otherwise.
2219 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
2220 enum pp2_outq_l3_type *l3_type,
2221 enum pp2_outq_l4_type *l4_type,
2226 * Based on ol_flags prepare information
2227 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
2230 if (ol_flags & PKT_TX_IPV4) {
2231 *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
2232 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
2233 } else if (ol_flags & PKT_TX_IPV6) {
2234 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
2235 /* no checksum for ipv6 header */
2238 /* if something different then stop processing */
2242 ol_flags &= PKT_TX_L4_MASK;
2243 if ((packet_type & RTE_PTYPE_L4_TCP) &&
2244 ol_flags == PKT_TX_TCP_CKSUM) {
2245 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
2247 } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
2248 ol_flags == PKT_TX_UDP_CKSUM) {
2249 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
2252 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
2253 /* no checksum for other type */
2261 * Release already sent buffers to bpool (buffer-pool).
2264 * Pointer to the port structure.
2266 * Pointer to the MUSDK hardware interface.
2268 * Pointer to the shadow queue.
2272 * Force releasing packets.
2275 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
2276 unsigned int core_id, struct mrvl_shadow_txq *sq,
2279 struct buff_release_entry *entry;
2280 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
2283 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
2285 sq->num_to_release += nb_done;
2287 if (likely(!force &&
2288 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
2291 nb_done = sq->num_to_release;
2292 sq->num_to_release = 0;
2294 for (i = 0; i < nb_done; i++) {
2295 entry = &sq->ent[sq->tail + num];
2296 if (unlikely(!entry->buff.addr)) {
2298 "Shadow memory @%d: cookie(%lx), pa(%lx)!",
2299 sq->tail, (u64)entry->buff.cookie,
2300 (u64)entry->buff.addr);
2305 if (unlikely(!entry->bpool)) {
2306 struct rte_mbuf *mbuf;
2308 mbuf = (struct rte_mbuf *)
2309 (cookie_addr_high | entry->buff.cookie);
2310 rte_pktmbuf_free(mbuf);
2315 mrvl_port_bpool_size
2316 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
2318 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
2323 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2325 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2332 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2333 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2339 * DPDK callback for transmit.
2342 * Generic pointer transmit queue.
2344 * Packets to transmit.
2346 * Number of packets in array.
2349 * Number of packets successfully transmitted.
2352 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2354 struct mrvl_txq *q = txq;
2355 struct mrvl_shadow_txq *sq;
2356 struct pp2_hif *hif;
2357 struct pp2_ppio_desc descs[nb_pkts];
2358 unsigned int core_id = rte_lcore_id();
2359 int i, ret, bytes_sent = 0;
2360 uint16_t num, sq_free_size;
2363 hif = mrvl_get_hif(q->priv, core_id);
2364 sq = &q->shadow_txqs[core_id];
2366 if (unlikely(!q->priv->ppio || !hif))
2370 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2371 sq, q->queue_id, 0);
2373 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2374 if (unlikely(nb_pkts > sq_free_size)) {
2376 "No room in shadow queue for %d packets! %d packets will be sent.",
2377 nb_pkts, sq_free_size);
2378 nb_pkts = sq_free_size;
2381 for (i = 0; i < nb_pkts; i++) {
2382 struct rte_mbuf *mbuf = tx_pkts[i];
2383 int gen_l3_cksum, gen_l4_cksum;
2384 enum pp2_outq_l3_type l3_type;
2385 enum pp2_outq_l4_type l4_type;
2387 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2388 struct rte_mbuf *pref_pkt_hdr;
2390 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2391 rte_mbuf_prefetch_part1(pref_pkt_hdr);
2392 rte_mbuf_prefetch_part2(pref_pkt_hdr);
2395 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
2396 sq->ent[sq->head].buff.addr =
2397 rte_mbuf_data_iova_default(mbuf);
2398 sq->ent[sq->head].bpool =
2399 (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
2400 mbuf->refcnt > 1)) ? NULL :
2401 mrvl_port_to_bpool_lookup[mbuf->port];
2402 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
2405 pp2_ppio_outq_desc_reset(&descs[i]);
2406 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
2407 rte_pktmbuf_iova(mbuf));
2408 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
2409 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
2410 rte_pktmbuf_pkt_len(mbuf));
2412 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2414 * in case unsupported ol_flags were passed
2415 * do not update descriptor offload information
2417 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
2418 &l3_type, &l4_type, &gen_l3_cksum,
2423 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2425 mbuf->l2_len + mbuf->l3_len,
2426 gen_l3_cksum, gen_l4_cksum);
2430 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2431 /* number of packets that were not sent */
2432 if (unlikely(num > nb_pkts)) {
2433 for (i = nb_pkts; i < num; i++) {
2434 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2435 MRVL_PP2_TX_SHADOWQ_MASK;
2436 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
2438 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2440 sq->size -= num - nb_pkts;
2443 q->bytes_sent += bytes_sent;
2449 * Initialize packet processor.
2452 * 0 on success, negative error value otherwise.
2457 struct pp2_init_params init_params;
2459 memset(&init_params, 0, sizeof(init_params));
2460 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
2461 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
2462 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
2464 return pp2_init(&init_params);
2468 * Deinitialize packet processor.
2471 * 0 on success, negative error value otherwise.
2474 mrvl_deinit_pp2(void)
2480 * Create private device structure.
2483 * Pointer to the port name passed in the initialization parameters.
2486 * Pointer to the newly allocated private device structure.
2488 static struct mrvl_priv *
2489 mrvl_priv_create(const char *dev_name)
2491 struct pp2_bpool_params bpool_params;
2492 char match[MRVL_MATCH_LEN];
2493 struct mrvl_priv *priv;
2496 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
2500 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
2501 &priv->pp_id, &priv->ppio_id);
2505 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
2506 PP2_BPOOL_NUM_POOLS);
2509 priv->bpool_bit = bpool_bit;
2511 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
2513 memset(&bpool_params, 0, sizeof(bpool_params));
2514 bpool_params.match = match;
2515 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
2516 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
2518 goto out_clear_bpool_bit;
2520 priv->ppio_params.type = PP2_PPIO_T_NIC;
2521 rte_spinlock_init(&priv->lock);
2524 out_clear_bpool_bit:
2525 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2532 * Create device representing Ethernet port.
2535 * Pointer to the port's name.
2538 * 0 on success, negative error value otherwise.
2541 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
2543 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
2544 struct rte_eth_dev *eth_dev;
2545 struct mrvl_priv *priv;
2548 eth_dev = rte_eth_dev_allocate(name);
2552 priv = mrvl_priv_create(name);
2558 eth_dev->data->mac_addrs =
2559 rte_zmalloc("mac_addrs",
2560 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
2561 if (!eth_dev->data->mac_addrs) {
2562 MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
2567 memset(&req, 0, sizeof(req));
2568 strcpy(req.ifr_name, name);
2569 ret = ioctl(fd, SIOCGIFHWADDR, &req);
2573 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
2574 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
2576 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
2577 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
2578 eth_dev->data->kdrv = RTE_KDRV_NONE;
2579 eth_dev->data->dev_private = priv;
2580 eth_dev->device = &vdev->device;
2581 eth_dev->dev_ops = &mrvl_ops;
2583 rte_eth_dev_probing_finish(eth_dev);
2586 rte_free(eth_dev->data->mac_addrs);
2588 rte_eth_dev_release_port(eth_dev);
2596 * Cleanup previously created device representing Ethernet port.
2599 * Pointer to the port name.
2602 mrvl_eth_dev_destroy(const char *name)
2604 struct rte_eth_dev *eth_dev;
2605 struct mrvl_priv *priv;
2607 eth_dev = rte_eth_dev_allocated(name);
2611 priv = eth_dev->data->dev_private;
2612 pp2_bpool_deinit(priv->bpool);
2613 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2615 rte_free(eth_dev->data->mac_addrs);
2616 rte_eth_dev_release_port(eth_dev);
2620 * Callback used by rte_kvargs_process() during argument parsing.
2623 * Pointer to the parsed key (unused).
2625 * Pointer to the parsed value.
2627 * Pointer to the extra arguments which contains address of the
2628 * table of pointers to parsed interface names.
2634 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
2637 struct mrvl_ifnames *ifnames = extra_args;
2639 ifnames->names[ifnames->idx++] = value;
2645 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
2648 mrvl_deinit_hifs(void)
2652 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
2654 pp2_hif_deinit(hifs[i]);
2656 used_hifs = MRVL_MUSDK_HIFS_RESERVED;
2657 memset(hifs, 0, sizeof(hifs));
2661 * DPDK callback to register the virtual device.
2664 * Pointer to the virtual device.
2667 * 0 on success, negative error value otherwise.
2670 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
2672 struct rte_kvargs *kvlist;
2673 struct mrvl_ifnames ifnames;
2675 uint32_t i, ifnum, cfgnum;
2678 params = rte_vdev_device_args(vdev);
2682 kvlist = rte_kvargs_parse(params, valid_args);
2686 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
2687 if (ifnum > RTE_DIM(ifnames.names))
2688 goto out_free_kvlist;
2691 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
2692 mrvl_get_ifnames, &ifnames);
2696 * The below system initialization should be done only once,
2697 * on the first provided configuration file
2699 if (!mrvl_qos_cfg) {
2700 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
2701 MRVL_LOG(INFO, "Parsing config file!");
2703 MRVL_LOG(ERR, "Cannot handle more than one config file!");
2704 goto out_free_kvlist;
2705 } else if (cfgnum == 1) {
2706 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
2707 mrvl_get_qoscfg, &mrvl_qos_cfg);
2714 MRVL_LOG(INFO, "Perform MUSDK initializations");
2716 ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
2718 goto out_free_kvlist;
2720 ret = mrvl_init_pp2();
2722 MRVL_LOG(ERR, "Failed to init PP!");
2723 rte_mvep_deinit(MVEP_MOD_T_PP2);
2724 goto out_free_kvlist;
2727 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
2728 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
2730 mrvl_lcore_first = RTE_MAX_LCORE;
2731 mrvl_lcore_last = 0;
2734 for (i = 0; i < ifnum; i++) {
2735 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
2736 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
2740 mrvl_dev_num += ifnum;
2742 rte_kvargs_free(kvlist);
2747 mrvl_eth_dev_destroy(ifnames.names[i]);
2749 if (mrvl_dev_num == 0) {
2751 rte_mvep_deinit(MVEP_MOD_T_PP2);
2754 rte_kvargs_free(kvlist);
2760 * DPDK callback to remove virtual device.
2763 * Pointer to the removed virtual device.
2766 * 0 on success, negative error value otherwise.
2769 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
2774 name = rte_vdev_device_name(vdev);
2778 MRVL_LOG(INFO, "Removing %s", name);
2780 RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
2781 char ifname[RTE_ETH_NAME_MAX_LEN];
2783 rte_eth_dev_get_name_by_port(i, ifname);
2784 mrvl_eth_dev_destroy(ifname);
2788 if (mrvl_dev_num == 0) {
2789 MRVL_LOG(INFO, "Perform MUSDK deinit");
2792 rte_mvep_deinit(MVEP_MOD_T_PP2);
2798 static struct rte_vdev_driver pmd_mrvl_drv = {
2799 .probe = rte_pmd_mrvl_probe,
2800 .remove = rte_pmd_mrvl_remove,
2803 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
2804 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
2806 RTE_INIT(mrvl_init_log)
2808 mrvl_logtype = rte_log_register("pmd.net.mvpp2");
2809 if (mrvl_logtype >= 0)
2810 rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);