1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
7 #include <rte_ethdev_driver.h>
8 #include <rte_kvargs.h>
10 #include <rte_malloc.h>
11 #include <rte_bus_vdev.h>
14 #include <linux/ethtool.h>
15 #include <linux/sockios.h>
17 #include <net/if_arp.h>
18 #include <sys/ioctl.h>
19 #include <sys/socket.h>
21 #include <sys/types.h>
23 #include <rte_mvep_common.h>
24 #include "mrvl_ethdev.h"
26 #include "mrvl_flow.h"
29 /* bitmask with reserved hifs */
30 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
31 /* bitmask with reserved bpools */
32 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
33 /* bitmask with reserved kernel RSS tables */
34 #define MRVL_MUSDK_RSS_RESERVED 0x01
35 /* maximum number of available hifs */
36 #define MRVL_MUSDK_HIFS_MAX 9
39 #define MRVL_MUSDK_PREFETCH_SHIFT 2
41 /* TCAM has 25 entries reserved for uc/mc filter entries */
42 #define MRVL_MAC_ADDRS_MAX 25
43 #define MRVL_MATCH_LEN 16
44 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
45 /* Maximum allowable packet size */
46 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
48 #define MRVL_IFACE_NAME_ARG "iface"
49 #define MRVL_CFG_ARG "cfg"
51 #define MRVL_BURST_SIZE 64
53 #define MRVL_ARP_LENGTH 28
55 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
57 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
58 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
60 /** Port Rx offload capabilities */
61 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
62 DEV_RX_OFFLOAD_JUMBO_FRAME | \
63 DEV_RX_OFFLOAD_CHECKSUM)
65 /** Port Tx offloads capabilities */
66 #define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
67 DEV_TX_OFFLOAD_UDP_CKSUM | \
68 DEV_TX_OFFLOAD_TCP_CKSUM)
70 static const char * const valid_args[] = {
76 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
77 static struct pp2_hif *hifs[RTE_MAX_LCORE];
78 static int used_bpools[PP2_NUM_PKT_PROC] = {
79 [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
82 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
83 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
84 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
89 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
94 * To use buffer harvesting based on loopback port shadow queue structure
95 * was introduced for buffers information bookkeeping.
97 * Before sending the packet, related buffer information (pp2_buff_inf) is
98 * stored in shadow queue. After packet is transmitted no longer used
99 * packet buffer is released back to it's original hardware pool,
100 * on condition it originated from interface.
101 * In case it was generated by application itself i.e: mbuf->port field is
102 * 0xff then its released to software mempool.
104 struct mrvl_shadow_txq {
105 int head; /* write index - used when sending buffers */
106 int tail; /* read index - used when releasing buffers */
107 u16 size; /* queue occupied size */
108 u16 num_to_release; /* number of buffers sent, that can be released */
109 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
113 struct mrvl_priv *priv;
114 struct rte_mempool *mp;
123 struct mrvl_priv *priv;
127 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
128 int tx_deferred_start;
131 static int mrvl_lcore_first;
132 static int mrvl_lcore_last;
133 static int mrvl_dev_num;
135 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
136 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
137 struct pp2_hif *hif, unsigned int core_id,
138 struct mrvl_shadow_txq *sq, int qid, int force);
140 #define MRVL_XSTATS_TBL_ENTRY(name) { \
141 #name, offsetof(struct pp2_ppio_statistics, name), \
142 sizeof(((struct pp2_ppio_statistics *)0)->name) \
145 /* Table with xstats data */
150 } mrvl_xstats_tbl[] = {
151 MRVL_XSTATS_TBL_ENTRY(rx_bytes),
152 MRVL_XSTATS_TBL_ENTRY(rx_packets),
153 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
154 MRVL_XSTATS_TBL_ENTRY(rx_errors),
155 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
156 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
157 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
158 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
159 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
160 MRVL_XSTATS_TBL_ENTRY(tx_bytes),
161 MRVL_XSTATS_TBL_ENTRY(tx_packets),
162 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
163 MRVL_XSTATS_TBL_ENTRY(tx_errors)
167 mrvl_get_bpool_size(int pp2_id, int pool_id)
172 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
173 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
179 mrvl_reserve_bit(int *bitmap, int max)
181 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
192 mrvl_init_hif(int core_id)
194 struct pp2_hif_params params;
195 char match[MRVL_MATCH_LEN];
198 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
200 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
204 snprintf(match, sizeof(match), "hif-%d", ret);
205 memset(¶ms, 0, sizeof(params));
206 params.match = match;
207 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
208 ret = pp2_hif_init(¶ms, &hifs[core_id]);
210 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
217 static inline struct pp2_hif*
218 mrvl_get_hif(struct mrvl_priv *priv, int core_id)
222 if (likely(hifs[core_id] != NULL))
223 return hifs[core_id];
225 rte_spinlock_lock(&priv->lock);
227 ret = mrvl_init_hif(core_id);
229 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
233 if (core_id < mrvl_lcore_first)
234 mrvl_lcore_first = core_id;
236 if (core_id > mrvl_lcore_last)
237 mrvl_lcore_last = core_id;
239 rte_spinlock_unlock(&priv->lock);
241 return hifs[core_id];
245 * Configure rss based on dpdk rss configuration.
248 * Pointer to private structure.
250 * Pointer to RSS configuration.
253 * 0 on success, negative error value otherwise.
256 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
258 if (rss_conf->rss_key)
259 MRVL_LOG(WARNING, "Changing hash key is not supported");
261 if (rss_conf->rss_hf == 0) {
262 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
263 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
264 priv->ppio_params.inqs_params.hash_type =
265 PP2_PPIO_HASH_T_2_TUPLE;
266 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
267 priv->ppio_params.inqs_params.hash_type =
268 PP2_PPIO_HASH_T_5_TUPLE;
269 priv->rss_hf_tcp = 1;
270 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
271 priv->ppio_params.inqs_params.hash_type =
272 PP2_PPIO_HASH_T_5_TUPLE;
273 priv->rss_hf_tcp = 0;
282 * Ethernet device configuration.
284 * Prepare the driver for a given number of TX and RX queues and
288 * Pointer to Ethernet device structure.
291 * 0 on success, negative error value otherwise.
294 mrvl_dev_configure(struct rte_eth_dev *dev)
296 struct mrvl_priv *priv = dev->data->dev_private;
300 MRVL_LOG(INFO, "Device reconfiguration is not supported");
304 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
305 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
306 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
307 dev->data->dev_conf.rxmode.mq_mode);
311 if (dev->data->dev_conf.rxmode.split_hdr_size) {
312 MRVL_LOG(INFO, "Split headers not supported");
316 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
317 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
318 ETHER_HDR_LEN - ETHER_CRC_LEN;
320 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
321 dev->data->nb_rx_queues);
325 ret = mrvl_configure_txqs(priv, dev->data->port_id,
326 dev->data->nb_tx_queues);
330 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
331 priv->ppio_params.maintain_stats = 1;
332 priv->nb_rx_queues = dev->data->nb_rx_queues;
334 if (dev->data->nb_rx_queues == 1 &&
335 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
336 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
337 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
342 return mrvl_configure_rss(priv,
343 &dev->data->dev_conf.rx_adv_conf.rss_conf);
347 * DPDK callback to change the MTU.
349 * Setting the MTU affects hardware MRU (packets larger than the MRU
353 * Pointer to Ethernet device structure.
358 * 0 on success, negative error value otherwise.
361 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
363 struct mrvl_priv *priv = dev->data->dev_private;
364 /* extra MV_MH_SIZE bytes are required for Marvell tag */
365 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
368 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
374 ret = pp2_ppio_set_mru(priv->ppio, mru);
378 return pp2_ppio_set_mtu(priv->ppio, mtu);
382 * DPDK callback to bring the link up.
385 * Pointer to Ethernet device structure.
388 * 0 on success, negative error value otherwise.
391 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
393 struct mrvl_priv *priv = dev->data->dev_private;
399 ret = pp2_ppio_enable(priv->ppio);
404 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
405 * as pp2_ppio_enable() changes port->t_mode from default 0 to
406 * PP2_TRAFFIC_INGRESS_EGRESS.
408 * Set mtu to default DPDK value here.
410 ret = mrvl_mtu_set(dev, dev->data->mtu);
412 pp2_ppio_disable(priv->ppio);
418 * DPDK callback to bring the link down.
421 * Pointer to Ethernet device structure.
424 * 0 on success, negative error value otherwise.
427 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
429 struct mrvl_priv *priv = dev->data->dev_private;
434 return pp2_ppio_disable(priv->ppio);
438 * DPDK callback to start tx queue.
441 * Pointer to Ethernet device structure.
443 * Transmit queue index.
446 * 0 on success, negative error value otherwise.
449 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
451 struct mrvl_priv *priv = dev->data->dev_private;
457 /* passing 1 enables given tx queue */
458 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
460 MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
464 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
470 * DPDK callback to stop tx queue.
473 * Pointer to Ethernet device structure.
475 * Transmit queue index.
478 * 0 on success, negative error value otherwise.
481 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
483 struct mrvl_priv *priv = dev->data->dev_private;
489 /* passing 0 disables given tx queue */
490 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
492 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
496 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
502 * DPDK callback to start the device.
505 * Pointer to Ethernet device structure.
508 * 0 on success, negative errno value on failure.
511 mrvl_dev_start(struct rte_eth_dev *dev)
513 struct mrvl_priv *priv = dev->data->dev_private;
514 char match[MRVL_MATCH_LEN];
515 int ret = 0, i, def_init_size;
518 return mrvl_dev_set_link_up(dev);
520 snprintf(match, sizeof(match), "ppio-%d:%d",
521 priv->pp_id, priv->ppio_id);
522 priv->ppio_params.match = match;
525 * Calculate the minimum bpool size for refill feature as follows:
526 * 2 default burst sizes multiply by number of rx queues.
527 * If the bpool size will be below this value, new buffers will
528 * be added to the pool.
530 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
532 /* In case initial bpool size configured in queues setup is
533 * smaller than minimum size add more buffers
535 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
536 if (priv->bpool_init_size < def_init_size) {
537 int buffs_to_add = def_init_size - priv->bpool_init_size;
539 priv->bpool_init_size += buffs_to_add;
540 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
542 MRVL_LOG(ERR, "Failed to add buffers to bpool");
546 * Calculate the maximum bpool size for refill feature as follows:
547 * maximum number of descriptors in rx queue multiply by number
548 * of rx queues plus minimum bpool size.
549 * In case the bpool size will exceed this value, superfluous buffers
552 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
553 priv->bpool_min_size;
555 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
557 MRVL_LOG(ERR, "Failed to init ppio");
562 * In case there are some some stale uc/mc mac addresses flush them
563 * here. It cannot be done during mrvl_dev_close() as port information
564 * is already gone at that point (due to pp2_ppio_deinit() in
567 if (!priv->uc_mc_flushed) {
568 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
571 "Failed to flush uc/mc filter list");
574 priv->uc_mc_flushed = 1;
577 if (!priv->vlan_flushed) {
578 ret = pp2_ppio_flush_vlan(priv->ppio);
580 MRVL_LOG(ERR, "Failed to flush vlan list");
583 * once pp2_ppio_flush_vlan() is supported jump to out
587 priv->vlan_flushed = 1;
590 /* For default QoS config, don't start classifier. */
592 ret = mrvl_start_qos_mapping(priv);
594 MRVL_LOG(ERR, "Failed to setup QoS mapping");
599 ret = mrvl_dev_set_link_up(dev);
601 MRVL_LOG(ERR, "Failed to set link up");
605 /* start tx queues */
606 for (i = 0; i < dev->data->nb_tx_queues; i++) {
607 struct mrvl_txq *txq = dev->data->tx_queues[i];
609 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
611 if (!txq->tx_deferred_start)
615 * All txqs are started by default. Stop them
616 * so that tx_deferred_start works as expected.
618 ret = mrvl_tx_queue_stop(dev, i);
628 MRVL_LOG(ERR, "Failed to start device");
629 pp2_ppio_deinit(priv->ppio);
634 * Flush receive queues.
637 * Pointer to Ethernet device structure.
640 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
644 MRVL_LOG(INFO, "Flushing rx queues");
645 for (i = 0; i < dev->data->nb_rx_queues; i++) {
649 struct mrvl_rxq *q = dev->data->rx_queues[i];
650 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
652 num = MRVL_PP2_RXD_MAX;
653 ret = pp2_ppio_recv(q->priv->ppio,
654 q->priv->rxq_map[q->queue_id].tc,
655 q->priv->rxq_map[q->queue_id].inq,
656 descs, (uint16_t *)&num);
657 } while (ret == 0 && num);
662 * Flush transmit shadow queues.
665 * Pointer to Ethernet device structure.
668 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
671 struct mrvl_txq *txq;
673 MRVL_LOG(INFO, "Flushing tx shadow queues");
674 for (i = 0; i < dev->data->nb_tx_queues; i++) {
675 txq = (struct mrvl_txq *)dev->data->tx_queues[i];
677 for (j = 0; j < RTE_MAX_LCORE; j++) {
678 struct mrvl_shadow_txq *sq;
683 sq = &txq->shadow_txqs[j];
684 mrvl_free_sent_buffers(txq->priv->ppio,
685 hifs[j], j, sq, txq->queue_id, 1);
686 while (sq->tail != sq->head) {
687 uint64_t addr = cookie_addr_high |
688 sq->ent[sq->tail].buff.cookie;
690 (struct rte_mbuf *)addr);
691 sq->tail = (sq->tail + 1) &
692 MRVL_PP2_TX_SHADOWQ_MASK;
694 memset(sq, 0, sizeof(*sq));
700 * Flush hardware bpool (buffer-pool).
703 * Pointer to Ethernet device structure.
706 mrvl_flush_bpool(struct rte_eth_dev *dev)
708 struct mrvl_priv *priv = dev->data->dev_private;
712 unsigned int core_id = rte_lcore_id();
714 if (core_id == LCORE_ID_ANY)
717 hif = mrvl_get_hif(priv, core_id);
719 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
721 MRVL_LOG(ERR, "Failed to get bpool buffers number");
726 struct pp2_buff_inf inf;
729 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
733 addr = cookie_addr_high | inf.cookie;
734 rte_pktmbuf_free((struct rte_mbuf *)addr);
739 * DPDK callback to stop the device.
742 * Pointer to Ethernet device structure.
745 mrvl_dev_stop(struct rte_eth_dev *dev)
747 mrvl_dev_set_link_down(dev);
751 * DPDK callback to close the device.
754 * Pointer to Ethernet device structure.
757 mrvl_dev_close(struct rte_eth_dev *dev)
759 struct mrvl_priv *priv = dev->data->dev_private;
762 mrvl_flush_rx_queues(dev);
763 mrvl_flush_tx_shadow_queues(dev);
764 mrvl_flow_deinit(dev);
765 mrvl_mtr_deinit(dev);
767 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
768 struct pp2_ppio_tc_params *tc_params =
769 &priv->ppio_params.inqs_params.tcs_params[i];
771 if (tc_params->inqs_params) {
772 rte_free(tc_params->inqs_params);
773 tc_params->inqs_params = NULL;
778 pp2_cls_tbl_deinit(priv->cls_tbl);
779 priv->cls_tbl = NULL;
783 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
784 priv->qos_tbl = NULL;
787 mrvl_flush_bpool(dev);
790 pp2_ppio_deinit(priv->ppio);
794 /* policer must be released after ppio deinitialization */
795 if (priv->default_policer) {
796 pp2_cls_plcr_deinit(priv->default_policer);
797 priv->default_policer = NULL;
802 * DPDK callback to retrieve physical link information.
805 * Pointer to Ethernet device structure.
806 * @param wait_to_complete
807 * Wait for request completion (ignored).
810 * 0 on success, negative error value otherwise.
813 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
817 * once MUSDK provides necessary API use it here
819 struct mrvl_priv *priv = dev->data->dev_private;
820 struct ethtool_cmd edata;
822 int ret, fd, link_up;
827 edata.cmd = ETHTOOL_GSET;
829 strcpy(req.ifr_name, dev->data->name);
830 req.ifr_data = (void *)&edata;
832 fd = socket(AF_INET, SOCK_DGRAM, 0);
836 ret = ioctl(fd, SIOCETHTOOL, &req);
844 switch (ethtool_cmd_speed(&edata)) {
846 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
849 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
852 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
855 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
858 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
861 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
862 ETH_LINK_HALF_DUPLEX;
863 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
865 pp2_ppio_get_link_state(priv->ppio, &link_up);
866 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
872 * DPDK callback to enable promiscuous mode.
875 * Pointer to Ethernet device structure.
878 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
880 struct mrvl_priv *priv = dev->data->dev_private;
889 ret = pp2_ppio_set_promisc(priv->ppio, 1);
891 MRVL_LOG(ERR, "Failed to enable promiscuous mode");
895 * DPDK callback to enable allmulti mode.
898 * Pointer to Ethernet device structure.
901 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
903 struct mrvl_priv *priv = dev->data->dev_private;
912 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
914 MRVL_LOG(ERR, "Failed enable all-multicast mode");
918 * DPDK callback to disable promiscuous mode.
921 * Pointer to Ethernet device structure.
924 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
926 struct mrvl_priv *priv = dev->data->dev_private;
932 ret = pp2_ppio_set_promisc(priv->ppio, 0);
934 MRVL_LOG(ERR, "Failed to disable promiscuous mode");
938 * DPDK callback to disable allmulticast mode.
941 * Pointer to Ethernet device structure.
944 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
946 struct mrvl_priv *priv = dev->data->dev_private;
952 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
954 MRVL_LOG(ERR, "Failed to disable all-multicast mode");
958 * DPDK callback to remove a MAC address.
961 * Pointer to Ethernet device structure.
966 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
968 struct mrvl_priv *priv = dev->data->dev_private;
969 char buf[ETHER_ADDR_FMT_SIZE];
978 ret = pp2_ppio_remove_mac_addr(priv->ppio,
979 dev->data->mac_addrs[index].addr_bytes);
981 ether_format_addr(buf, sizeof(buf),
982 &dev->data->mac_addrs[index]);
983 MRVL_LOG(ERR, "Failed to remove mac %s", buf);
988 * DPDK callback to add a MAC address.
991 * Pointer to Ethernet device structure.
993 * MAC address to register.
997 * VMDq pool index to associate address with (unused).
1000 * 0 on success, negative error value otherwise.
1003 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1004 uint32_t index, uint32_t vmdq __rte_unused)
1006 struct mrvl_priv *priv = dev->data->dev_private;
1007 char buf[ETHER_ADDR_FMT_SIZE];
1014 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
1021 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
1022 * parameter uc_filter_max. Maximum number of mc addresses is then
1023 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
1026 * If more than uc_filter_max uc addresses were added to filter list
1027 * then NIC will switch to promiscuous mode automatically.
1029 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
1030 * were added to filter list then NIC will switch to all-multicast mode
1033 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
1035 ether_format_addr(buf, sizeof(buf), mac_addr);
1036 MRVL_LOG(ERR, "Failed to add mac %s", buf);
1044 * DPDK callback to set the primary MAC address.
1047 * Pointer to Ethernet device structure.
1049 * MAC address to register.
1052 * 0 on success, negative error value otherwise.
1055 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1057 struct mrvl_priv *priv = dev->data->dev_private;
1066 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
1068 char buf[ETHER_ADDR_FMT_SIZE];
1069 ether_format_addr(buf, sizeof(buf), mac_addr);
1070 MRVL_LOG(ERR, "Failed to set mac to %s", buf);
1077 * DPDK callback to get device statistics.
1080 * Pointer to Ethernet device structure.
1082 * Stats structure output buffer.
1085 * 0 on success, negative error value otherwise.
1088 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1090 struct mrvl_priv *priv = dev->data->dev_private;
1091 struct pp2_ppio_statistics ppio_stats;
1092 uint64_t drop_mac = 0;
1093 unsigned int i, idx, ret;
1098 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1099 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1100 struct pp2_ppio_inq_statistics rx_stats;
1105 idx = rxq->queue_id;
1106 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1108 "rx queue %d stats out of range (0 - %d)",
1109 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1113 ret = pp2_ppio_inq_get_statistics(priv->ppio,
1114 priv->rxq_map[idx].tc,
1115 priv->rxq_map[idx].inq,
1117 if (unlikely(ret)) {
1119 "Failed to update rx queue %d stats", idx);
1123 stats->q_ibytes[idx] = rxq->bytes_recv;
1124 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
1125 stats->q_errors[idx] = rx_stats.drop_early +
1126 rx_stats.drop_fullq +
1129 stats->ibytes += rxq->bytes_recv;
1130 drop_mac += rxq->drop_mac;
1133 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1134 struct mrvl_txq *txq = dev->data->tx_queues[i];
1135 struct pp2_ppio_outq_statistics tx_stats;
1140 idx = txq->queue_id;
1141 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1143 "tx queue %d stats out of range (0 - %d)",
1144 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1147 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1149 if (unlikely(ret)) {
1151 "Failed to update tx queue %d stats", idx);
1155 stats->q_opackets[idx] = tx_stats.deq_desc;
1156 stats->q_obytes[idx] = txq->bytes_sent;
1157 stats->obytes += txq->bytes_sent;
1160 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1161 if (unlikely(ret)) {
1162 MRVL_LOG(ERR, "Failed to update port statistics");
1166 stats->ipackets += ppio_stats.rx_packets - drop_mac;
1167 stats->opackets += ppio_stats.tx_packets;
1168 stats->imissed += ppio_stats.rx_fullq_dropped +
1169 ppio_stats.rx_bm_dropped +
1170 ppio_stats.rx_early_dropped +
1171 ppio_stats.rx_fifo_dropped +
1172 ppio_stats.rx_cls_dropped;
1173 stats->ierrors = drop_mac;
1179 * DPDK callback to clear device statistics.
1182 * Pointer to Ethernet device structure.
1185 mrvl_stats_reset(struct rte_eth_dev *dev)
1187 struct mrvl_priv *priv = dev->data->dev_private;
1193 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1194 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1196 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1197 priv->rxq_map[i].inq, NULL, 1);
1198 rxq->bytes_recv = 0;
1202 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1203 struct mrvl_txq *txq = dev->data->tx_queues[i];
1205 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1206 txq->bytes_sent = 0;
1209 pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1213 * DPDK callback to get extended statistics.
1216 * Pointer to Ethernet device structure.
1218 * Pointer to xstats table.
1220 * Number of entries in xstats table.
1222 * Negative value on error, number of read xstats otherwise.
1225 mrvl_xstats_get(struct rte_eth_dev *dev,
1226 struct rte_eth_xstat *stats, unsigned int n)
1228 struct mrvl_priv *priv = dev->data->dev_private;
1229 struct pp2_ppio_statistics ppio_stats;
1235 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1236 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
1239 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
1240 val = *(uint32_t *)((uint8_t *)&ppio_stats +
1241 mrvl_xstats_tbl[i].offset);
1242 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
1243 val = *(uint64_t *)((uint8_t *)&ppio_stats +
1244 mrvl_xstats_tbl[i].offset);
1249 stats[i].value = val;
1256 * DPDK callback to reset extended statistics.
1259 * Pointer to Ethernet device structure.
1262 mrvl_xstats_reset(struct rte_eth_dev *dev)
1264 mrvl_stats_reset(dev);
1268 * DPDK callback to get extended statistics names.
1270 * @param dev (unused)
1271 * Pointer to Ethernet device structure.
1272 * @param xstats_names
1273 * Pointer to xstats names table.
1275 * Size of the xstats names table.
1277 * Number of read names.
1280 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1281 struct rte_eth_xstat_name *xstats_names,
1287 return RTE_DIM(mrvl_xstats_tbl);
1289 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
1290 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
1291 mrvl_xstats_tbl[i].name);
1297 * DPDK callback to get information about the device.
1300 * Pointer to Ethernet device structure (unused).
1302 * Info structure output buffer.
1305 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
1306 struct rte_eth_dev_info *info)
1308 info->speed_capa = ETH_LINK_SPEED_10M |
1309 ETH_LINK_SPEED_100M |
1313 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1314 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1315 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1317 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1318 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1319 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1321 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1322 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1323 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1325 info->rx_offload_capa = MRVL_RX_OFFLOADS;
1326 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
1328 info->tx_offload_capa = MRVL_TX_OFFLOADS;
1329 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
1331 info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1332 ETH_RSS_NONFRAG_IPV4_TCP |
1333 ETH_RSS_NONFRAG_IPV4_UDP;
1335 /* By default packets are dropped if no descriptors are available */
1336 info->default_rxconf.rx_drop_en = 1;
1338 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1342 * Return supported packet types.
1345 * Pointer to Ethernet device structure (unused).
1348 * Const pointer to the table with supported packet types.
1350 static const uint32_t *
1351 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1353 static const uint32_t ptypes[] = {
1355 RTE_PTYPE_L2_ETHER_VLAN,
1356 RTE_PTYPE_L2_ETHER_QINQ,
1358 RTE_PTYPE_L3_IPV4_EXT,
1359 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1361 RTE_PTYPE_L3_IPV6_EXT,
1362 RTE_PTYPE_L2_ETHER_ARP,
1371 * DPDK callback to get information about specific receive queue.
1374 * Pointer to Ethernet device structure.
1375 * @param rx_queue_id
1376 * Receive queue index.
1378 * Receive queue information structure.
1380 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1381 struct rte_eth_rxq_info *qinfo)
1383 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1384 struct mrvl_priv *priv = dev->data->dev_private;
1385 int inq = priv->rxq_map[rx_queue_id].inq;
1386 int tc = priv->rxq_map[rx_queue_id].tc;
1387 struct pp2_ppio_tc_params *tc_params =
1388 &priv->ppio_params.inqs_params.tcs_params[tc];
1391 qinfo->nb_desc = tc_params->inqs_params[inq].size;
1395 * DPDK callback to get information about specific transmit queue.
1398 * Pointer to Ethernet device structure.
1399 * @param tx_queue_id
1400 * Transmit queue index.
1402 * Transmit queue information structure.
1404 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1405 struct rte_eth_txq_info *qinfo)
1407 struct mrvl_priv *priv = dev->data->dev_private;
1408 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
1411 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1412 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1416 * DPDK callback to Configure a VLAN filter.
1419 * Pointer to Ethernet device structure.
1421 * VLAN ID to filter.
1426 * 0 on success, negative error value otherwise.
1429 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1431 struct mrvl_priv *priv = dev->data->dev_private;
1439 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1440 pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1444 * Release buffers to hardware bpool (buffer-pool)
1447 * Receive queue pointer.
1449 * Number of buffers to release to bpool.
1452 * 0 on success, negative error value otherwise.
1455 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1457 struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
1458 struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
1460 unsigned int core_id;
1461 struct pp2_hif *hif;
1462 struct pp2_bpool *bpool;
1464 core_id = rte_lcore_id();
1465 if (core_id == LCORE_ID_ANY)
1468 hif = mrvl_get_hif(rxq->priv, core_id);
1472 bpool = rxq->priv->bpool;
1474 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1478 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1480 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1482 for (i = 0; i < num; i++) {
1483 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1484 != cookie_addr_high) {
1486 "mbuf virtual addr high 0x%lx out of range",
1487 (uint64_t)mbufs[i] >> 32);
1491 entries[i].buff.addr =
1492 rte_mbuf_data_iova_default(mbufs[i]);
1493 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
1494 entries[i].bpool = bpool;
1497 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1498 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1505 for (; i < num; i++)
1506 rte_pktmbuf_free(mbufs[i]);
1512 * DPDK callback to configure the receive queue.
1515 * Pointer to Ethernet device structure.
1519 * Number of descriptors to configure in queue.
1521 * NUMA socket on which memory must be allocated.
1523 * Thresholds parameters.
1525 * Memory pool for buffer allocations.
1528 * 0 on success, negative error value otherwise.
1531 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1532 unsigned int socket,
1533 const struct rte_eth_rxconf *conf,
1534 struct rte_mempool *mp)
1536 struct mrvl_priv *priv = dev->data->dev_private;
1537 struct mrvl_rxq *rxq;
1539 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1543 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1545 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1547 * Unknown TC mapping, mapping will not have a correct queue.
1549 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
1550 idx, priv->ppio_id);
1554 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
1555 MRVL_PKT_EFFEC_OFFS;
1556 if (min_size < max_rx_pkt_len) {
1558 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.",
1559 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
1560 MRVL_PKT_EFFEC_OFFS,
1565 if (dev->data->rx_queues[idx]) {
1566 rte_free(dev->data->rx_queues[idx]);
1567 dev->data->rx_queues[idx] = NULL;
1570 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
1576 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
1577 rxq->queue_id = idx;
1578 rxq->port_id = dev->data->port_id;
1579 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
1581 tc = priv->rxq_map[rxq->queue_id].tc,
1582 inq = priv->rxq_map[rxq->queue_id].inq;
1583 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
1586 ret = mrvl_fill_bpool(rxq, desc);
1592 priv->bpool_init_size += desc;
1594 dev->data->rx_queues[idx] = rxq;
1600 * DPDK callback to release the receive queue.
1603 * Generic receive queue pointer.
1606 mrvl_rx_queue_release(void *rxq)
1608 struct mrvl_rxq *q = rxq;
1609 struct pp2_ppio_tc_params *tc_params;
1610 int i, num, tc, inq;
1611 struct pp2_hif *hif;
1612 unsigned int core_id = rte_lcore_id();
1614 if (core_id == LCORE_ID_ANY)
1620 hif = mrvl_get_hif(q->priv, core_id);
1625 tc = q->priv->rxq_map[q->queue_id].tc;
1626 inq = q->priv->rxq_map[q->queue_id].inq;
1627 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1628 num = tc_params->inqs_params[inq].size;
1629 for (i = 0; i < num; i++) {
1630 struct pp2_buff_inf inf;
1633 pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
1634 addr = cookie_addr_high | inf.cookie;
1635 rte_pktmbuf_free((struct rte_mbuf *)addr);
1642 * DPDK callback to configure the transmit queue.
1645 * Pointer to Ethernet device structure.
1647 * Transmit queue index.
1649 * Number of descriptors to configure in the queue.
1651 * NUMA socket on which memory must be allocated.
1653 * Tx queue configuration parameters.
1656 * 0 on success, negative error value otherwise.
1659 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1660 unsigned int socket,
1661 const struct rte_eth_txconf *conf)
1663 struct mrvl_priv *priv = dev->data->dev_private;
1664 struct mrvl_txq *txq;
1666 if (dev->data->tx_queues[idx]) {
1667 rte_free(dev->data->tx_queues[idx]);
1668 dev->data->tx_queues[idx] = NULL;
1671 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1676 txq->queue_id = idx;
1677 txq->port_id = dev->data->port_id;
1678 txq->tx_deferred_start = conf->tx_deferred_start;
1679 dev->data->tx_queues[idx] = txq;
1681 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1687 * DPDK callback to release the transmit queue.
1690 * Generic transmit queue pointer.
1693 mrvl_tx_queue_release(void *txq)
1695 struct mrvl_txq *q = txq;
1704 * DPDK callback to get flow control configuration.
1707 * Pointer to Ethernet device structure.
1709 * Pointer to the flow control configuration.
1712 * 0 on success, negative error value otherwise.
1715 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1717 struct mrvl_priv *priv = dev->data->dev_private;
1723 ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
1725 MRVL_LOG(ERR, "Failed to read rx pause state");
1729 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
1735 * DPDK callback to set flow control configuration.
1738 * Pointer to Ethernet device structure.
1740 * Pointer to the flow control configuration.
1743 * 0 on success, negative error value otherwise.
1746 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1748 struct mrvl_priv *priv = dev->data->dev_private;
1753 if (fc_conf->high_water ||
1754 fc_conf->low_water ||
1755 fc_conf->pause_time ||
1756 fc_conf->mac_ctrl_frame_fwd ||
1758 MRVL_LOG(ERR, "Flowctrl parameter is not supported");
1763 if (fc_conf->mode == RTE_FC_NONE ||
1764 fc_conf->mode == RTE_FC_RX_PAUSE) {
1767 en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
1768 ret = pp2_ppio_set_rx_pause(priv->ppio, en);
1771 "Failed to change flowctrl on RX side");
1780 * Update RSS hash configuration
1783 * Pointer to Ethernet device structure.
1785 * Pointer to RSS configuration.
1788 * 0 on success, negative error value otherwise.
1791 mrvl_rss_hash_update(struct rte_eth_dev *dev,
1792 struct rte_eth_rss_conf *rss_conf)
1794 struct mrvl_priv *priv = dev->data->dev_private;
1799 return mrvl_configure_rss(priv, rss_conf);
1803 * DPDK callback to get RSS hash configuration.
1806 * Pointer to Ethernet device structure.
1808 * Pointer to RSS configuration.
1814 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
1815 struct rte_eth_rss_conf *rss_conf)
1817 struct mrvl_priv *priv = dev->data->dev_private;
1818 enum pp2_ppio_hash_type hash_type =
1819 priv->ppio_params.inqs_params.hash_type;
1821 rss_conf->rss_key = NULL;
1823 if (hash_type == PP2_PPIO_HASH_T_NONE)
1824 rss_conf->rss_hf = 0;
1825 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
1826 rss_conf->rss_hf = ETH_RSS_IPV4;
1827 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
1828 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
1829 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
1830 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
1836 * DPDK callback to get rte_flow callbacks.
1839 * Pointer to the device structure.
1843 * Flow filter operation.
1845 * Pointer to pass the flow ops.
1848 * 0 on success, negative error value otherwise.
1851 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
1852 enum rte_filter_type filter_type,
1853 enum rte_filter_op filter_op, void *arg)
1855 switch (filter_type) {
1856 case RTE_ETH_FILTER_GENERIC:
1857 if (filter_op != RTE_ETH_FILTER_GET)
1859 *(const void **)arg = &mrvl_flow_ops;
1862 MRVL_LOG(WARNING, "Filter type (%d) not supported",
1869 * DPDK callback to get rte_mtr callbacks.
1872 * Pointer to the device structure.
1874 * Pointer to pass the mtr ops.
1880 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
1882 *(const void **)ops = &mrvl_mtr_ops;
1887 static const struct eth_dev_ops mrvl_ops = {
1888 .dev_configure = mrvl_dev_configure,
1889 .dev_start = mrvl_dev_start,
1890 .dev_stop = mrvl_dev_stop,
1891 .dev_set_link_up = mrvl_dev_set_link_up,
1892 .dev_set_link_down = mrvl_dev_set_link_down,
1893 .dev_close = mrvl_dev_close,
1894 .link_update = mrvl_link_update,
1895 .promiscuous_enable = mrvl_promiscuous_enable,
1896 .allmulticast_enable = mrvl_allmulticast_enable,
1897 .promiscuous_disable = mrvl_promiscuous_disable,
1898 .allmulticast_disable = mrvl_allmulticast_disable,
1899 .mac_addr_remove = mrvl_mac_addr_remove,
1900 .mac_addr_add = mrvl_mac_addr_add,
1901 .mac_addr_set = mrvl_mac_addr_set,
1902 .mtu_set = mrvl_mtu_set,
1903 .stats_get = mrvl_stats_get,
1904 .stats_reset = mrvl_stats_reset,
1905 .xstats_get = mrvl_xstats_get,
1906 .xstats_reset = mrvl_xstats_reset,
1907 .xstats_get_names = mrvl_xstats_get_names,
1908 .dev_infos_get = mrvl_dev_infos_get,
1909 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
1910 .rxq_info_get = mrvl_rxq_info_get,
1911 .txq_info_get = mrvl_txq_info_get,
1912 .vlan_filter_set = mrvl_vlan_filter_set,
1913 .tx_queue_start = mrvl_tx_queue_start,
1914 .tx_queue_stop = mrvl_tx_queue_stop,
1915 .rx_queue_setup = mrvl_rx_queue_setup,
1916 .rx_queue_release = mrvl_rx_queue_release,
1917 .tx_queue_setup = mrvl_tx_queue_setup,
1918 .tx_queue_release = mrvl_tx_queue_release,
1919 .flow_ctrl_get = mrvl_flow_ctrl_get,
1920 .flow_ctrl_set = mrvl_flow_ctrl_set,
1921 .rss_hash_update = mrvl_rss_hash_update,
1922 .rss_hash_conf_get = mrvl_rss_hash_conf_get,
1923 .filter_ctrl = mrvl_eth_filter_ctrl,
1924 .mtr_ops_get = mrvl_mtr_ops_get,
1928 * Return packet type information and l3/l4 offsets.
1931 * Pointer to the received packet descriptor.
1938 * Packet type information.
1940 static inline uint64_t
1941 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
1942 uint8_t *l3_offset, uint8_t *l4_offset)
1944 enum pp2_inq_l3_type l3_type;
1945 enum pp2_inq_l4_type l4_type;
1946 enum pp2_inq_vlan_tag vlan_tag;
1947 uint64_t packet_type;
1949 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
1950 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
1951 pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
1953 packet_type = RTE_PTYPE_L2_ETHER;
1956 case PP2_INQ_VLAN_TAG_SINGLE:
1957 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
1959 case PP2_INQ_VLAN_TAG_DOUBLE:
1960 case PP2_INQ_VLAN_TAG_TRIPLE:
1961 packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
1968 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
1969 packet_type |= RTE_PTYPE_L3_IPV4;
1971 case PP2_INQ_L3_TYPE_IPV4_OK:
1972 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
1974 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
1975 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
1977 case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
1978 packet_type |= RTE_PTYPE_L3_IPV6;
1980 case PP2_INQ_L3_TYPE_IPV6_EXT:
1981 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
1983 case PP2_INQ_L3_TYPE_ARP:
1984 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
1986 * In case of ARP l4_offset is set to wrong value.
1987 * Set it to proper one so that later on mbuf->l3_len can be
1988 * calculated subtracting l4_offset and l3_offset.
1990 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
1993 MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
1998 case PP2_INQ_L4_TYPE_TCP:
1999 packet_type |= RTE_PTYPE_L4_TCP;
2001 case PP2_INQ_L4_TYPE_UDP:
2002 packet_type |= RTE_PTYPE_L4_UDP;
2005 MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
2013 * Get offload information from the received packet descriptor.
2016 * Pointer to the received packet descriptor.
2019 * Mbuf offload flags.
2021 static inline uint64_t
2022 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
2025 enum pp2_inq_desc_status status;
2027 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
2028 if (unlikely(status != PP2_DESC_ERR_OK))
2029 flags = PKT_RX_IP_CKSUM_BAD;
2031 flags = PKT_RX_IP_CKSUM_GOOD;
2033 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
2034 if (unlikely(status != PP2_DESC_ERR_OK))
2035 flags |= PKT_RX_L4_CKSUM_BAD;
2037 flags |= PKT_RX_L4_CKSUM_GOOD;
2043 * DPDK callback for receive.
2046 * Generic pointer to the receive queue.
2048 * Array to store received packets.
2050 * Maximum number of packets in array.
2053 * Number of packets successfully received.
2056 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2058 struct mrvl_rxq *q = rxq;
2059 struct pp2_ppio_desc descs[nb_pkts];
2060 struct pp2_bpool *bpool;
2061 int i, ret, rx_done = 0;
2063 struct pp2_hif *hif;
2064 unsigned int core_id = rte_lcore_id();
2066 hif = mrvl_get_hif(q->priv, core_id);
2068 if (unlikely(!q->priv->ppio || !hif))
2071 bpool = q->priv->bpool;
2073 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
2074 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
2075 if (unlikely(ret < 0)) {
2076 MRVL_LOG(ERR, "Failed to receive packets");
2079 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
2081 for (i = 0; i < nb_pkts; i++) {
2082 struct rte_mbuf *mbuf;
2083 uint8_t l3_offset, l4_offset;
2084 enum pp2_inq_desc_status status;
2087 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2088 struct pp2_ppio_desc *pref_desc;
2091 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
2092 pref_addr = cookie_addr_high |
2093 pp2_ppio_inq_desc_get_cookie(pref_desc);
2094 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
2095 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
2098 addr = cookie_addr_high |
2099 pp2_ppio_inq_desc_get_cookie(&descs[i]);
2100 mbuf = (struct rte_mbuf *)addr;
2101 rte_pktmbuf_reset(mbuf);
2103 /* drop packet in case of mac, overrun or resource error */
2104 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
2105 if (unlikely(status != PP2_DESC_ERR_OK)) {
2106 struct pp2_buff_inf binf = {
2107 .addr = rte_mbuf_data_iova_default(mbuf),
2108 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
2111 pp2_bpool_put_buff(hif, bpool, &binf);
2112 mrvl_port_bpool_size
2113 [bpool->pp2_id][bpool->id][core_id]++;
2118 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
2119 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
2120 mbuf->data_len = mbuf->pkt_len;
2121 mbuf->port = q->port_id;
2123 mrvl_desc_to_packet_type_and_offset(&descs[i],
2126 mbuf->l2_len = l3_offset;
2127 mbuf->l3_len = l4_offset - l3_offset;
2129 if (likely(q->cksum_enabled))
2130 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
2132 rx_pkts[rx_done++] = mbuf;
2133 q->bytes_recv += mbuf->pkt_len;
2136 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
2137 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
2139 if (unlikely(num <= q->priv->bpool_min_size ||
2140 (!rx_done && num < q->priv->bpool_init_size))) {
2141 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
2143 MRVL_LOG(ERR, "Failed to fill bpool");
2144 } else if (unlikely(num > q->priv->bpool_max_size)) {
2146 int pkt_to_remove = num - q->priv->bpool_init_size;
2147 struct rte_mbuf *mbuf;
2148 struct pp2_buff_inf buff;
2151 "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
2152 bpool->pp2_id, q->priv->ppio->port_id,
2153 bpool->id, pkt_to_remove, num,
2154 q->priv->bpool_init_size);
2156 for (i = 0; i < pkt_to_remove; i++) {
2157 ret = pp2_bpool_get_buff(hif, bpool, &buff);
2160 mbuf = (struct rte_mbuf *)
2161 (cookie_addr_high | buff.cookie);
2162 rte_pktmbuf_free(mbuf);
2164 mrvl_port_bpool_size
2165 [bpool->pp2_id][bpool->id][core_id] -= i;
2167 rte_spinlock_unlock(&q->priv->lock);
2174 * Prepare offload information.
2178 * @param packet_type
2179 * Packet type bitfield.
2181 * Pointer to the pp2_ouq_l3_type structure.
2183 * Pointer to the pp2_outq_l4_type structure.
2184 * @param gen_l3_cksum
2185 * Will be set to 1 in case l3 checksum is computed.
2187 * Will be set to 1 in case l4 checksum is computed.
2190 * 0 on success, negative error value otherwise.
2193 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
2194 enum pp2_outq_l3_type *l3_type,
2195 enum pp2_outq_l4_type *l4_type,
2200 * Based on ol_flags prepare information
2201 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
2204 if (ol_flags & PKT_TX_IPV4) {
2205 *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
2206 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
2207 } else if (ol_flags & PKT_TX_IPV6) {
2208 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
2209 /* no checksum for ipv6 header */
2212 /* if something different then stop processing */
2216 ol_flags &= PKT_TX_L4_MASK;
2217 if ((packet_type & RTE_PTYPE_L4_TCP) &&
2218 ol_flags == PKT_TX_TCP_CKSUM) {
2219 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
2221 } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
2222 ol_flags == PKT_TX_UDP_CKSUM) {
2223 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
2226 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
2227 /* no checksum for other type */
2235 * Release already sent buffers to bpool (buffer-pool).
2238 * Pointer to the port structure.
2240 * Pointer to the MUSDK hardware interface.
2242 * Pointer to the shadow queue.
2246 * Force releasing packets.
2249 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
2250 unsigned int core_id, struct mrvl_shadow_txq *sq,
2253 struct buff_release_entry *entry;
2254 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
2257 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
2259 sq->num_to_release += nb_done;
2261 if (likely(!force &&
2262 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
2265 nb_done = sq->num_to_release;
2266 sq->num_to_release = 0;
2268 for (i = 0; i < nb_done; i++) {
2269 entry = &sq->ent[sq->tail + num];
2270 if (unlikely(!entry->buff.addr)) {
2272 "Shadow memory @%d: cookie(%lx), pa(%lx)!",
2273 sq->tail, (u64)entry->buff.cookie,
2274 (u64)entry->buff.addr);
2279 if (unlikely(!entry->bpool)) {
2280 struct rte_mbuf *mbuf;
2282 mbuf = (struct rte_mbuf *)
2283 (cookie_addr_high | entry->buff.cookie);
2284 rte_pktmbuf_free(mbuf);
2289 mrvl_port_bpool_size
2290 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
2292 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
2297 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2299 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2306 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2307 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2313 * DPDK callback for transmit.
2316 * Generic pointer transmit queue.
2318 * Packets to transmit.
2320 * Number of packets in array.
2323 * Number of packets successfully transmitted.
2326 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2328 struct mrvl_txq *q = txq;
2329 struct mrvl_shadow_txq *sq;
2330 struct pp2_hif *hif;
2331 struct pp2_ppio_desc descs[nb_pkts];
2332 unsigned int core_id = rte_lcore_id();
2333 int i, ret, bytes_sent = 0;
2334 uint16_t num, sq_free_size;
2337 hif = mrvl_get_hif(q->priv, core_id);
2338 sq = &q->shadow_txqs[core_id];
2340 if (unlikely(!q->priv->ppio || !hif))
2344 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2345 sq, q->queue_id, 0);
2347 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2348 if (unlikely(nb_pkts > sq_free_size)) {
2350 "No room in shadow queue for %d packets! %d packets will be sent.",
2351 nb_pkts, sq_free_size);
2352 nb_pkts = sq_free_size;
2355 for (i = 0; i < nb_pkts; i++) {
2356 struct rte_mbuf *mbuf = tx_pkts[i];
2357 int gen_l3_cksum, gen_l4_cksum;
2358 enum pp2_outq_l3_type l3_type;
2359 enum pp2_outq_l4_type l4_type;
2361 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2362 struct rte_mbuf *pref_pkt_hdr;
2364 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2365 rte_mbuf_prefetch_part1(pref_pkt_hdr);
2366 rte_mbuf_prefetch_part2(pref_pkt_hdr);
2369 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
2370 sq->ent[sq->head].buff.addr =
2371 rte_mbuf_data_iova_default(mbuf);
2372 sq->ent[sq->head].bpool =
2373 (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
2374 mbuf->refcnt > 1)) ? NULL :
2375 mrvl_port_to_bpool_lookup[mbuf->port];
2376 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
2379 pp2_ppio_outq_desc_reset(&descs[i]);
2380 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
2381 rte_pktmbuf_iova(mbuf));
2382 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
2383 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
2384 rte_pktmbuf_pkt_len(mbuf));
2386 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2388 * in case unsupported ol_flags were passed
2389 * do not update descriptor offload information
2391 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
2392 &l3_type, &l4_type, &gen_l3_cksum,
2397 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2399 mbuf->l2_len + mbuf->l3_len,
2400 gen_l3_cksum, gen_l4_cksum);
2404 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2405 /* number of packets that were not sent */
2406 if (unlikely(num > nb_pkts)) {
2407 for (i = nb_pkts; i < num; i++) {
2408 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2409 MRVL_PP2_TX_SHADOWQ_MASK;
2410 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
2412 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2414 sq->size -= num - nb_pkts;
2417 q->bytes_sent += bytes_sent;
2423 * Initialize packet processor.
2426 * 0 on success, negative error value otherwise.
2431 struct pp2_init_params init_params;
2433 memset(&init_params, 0, sizeof(init_params));
2434 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
2435 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
2436 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
2438 return pp2_init(&init_params);
2442 * Deinitialize packet processor.
2445 * 0 on success, negative error value otherwise.
2448 mrvl_deinit_pp2(void)
2454 * Create private device structure.
2457 * Pointer to the port name passed in the initialization parameters.
2460 * Pointer to the newly allocated private device structure.
2462 static struct mrvl_priv *
2463 mrvl_priv_create(const char *dev_name)
2465 struct pp2_bpool_params bpool_params;
2466 char match[MRVL_MATCH_LEN];
2467 struct mrvl_priv *priv;
2470 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
2474 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
2475 &priv->pp_id, &priv->ppio_id);
2479 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
2480 PP2_BPOOL_NUM_POOLS);
2483 priv->bpool_bit = bpool_bit;
2485 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
2487 memset(&bpool_params, 0, sizeof(bpool_params));
2488 bpool_params.match = match;
2489 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
2490 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
2492 goto out_clear_bpool_bit;
2494 priv->ppio_params.type = PP2_PPIO_T_NIC;
2495 rte_spinlock_init(&priv->lock);
2498 out_clear_bpool_bit:
2499 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2506 * Create device representing Ethernet port.
2509 * Pointer to the port's name.
2512 * 0 on success, negative error value otherwise.
2515 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
2517 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
2518 struct rte_eth_dev *eth_dev;
2519 struct mrvl_priv *priv;
2522 eth_dev = rte_eth_dev_allocate(name);
2526 priv = mrvl_priv_create(name);
2532 eth_dev->data->mac_addrs =
2533 rte_zmalloc("mac_addrs",
2534 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
2535 if (!eth_dev->data->mac_addrs) {
2536 MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
2541 memset(&req, 0, sizeof(req));
2542 strcpy(req.ifr_name, name);
2543 ret = ioctl(fd, SIOCGIFHWADDR, &req);
2547 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
2548 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
2550 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
2551 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
2552 eth_dev->data->kdrv = RTE_KDRV_NONE;
2553 eth_dev->data->dev_private = priv;
2554 eth_dev->device = &vdev->device;
2555 eth_dev->dev_ops = &mrvl_ops;
2557 rte_eth_dev_probing_finish(eth_dev);
2560 rte_free(eth_dev->data->mac_addrs);
2562 rte_eth_dev_release_port(eth_dev);
2570 * Cleanup previously created device representing Ethernet port.
2573 * Pointer to the port name.
2576 mrvl_eth_dev_destroy(const char *name)
2578 struct rte_eth_dev *eth_dev;
2579 struct mrvl_priv *priv;
2581 eth_dev = rte_eth_dev_allocated(name);
2585 priv = eth_dev->data->dev_private;
2586 pp2_bpool_deinit(priv->bpool);
2587 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2589 rte_free(eth_dev->data->mac_addrs);
2590 rte_eth_dev_release_port(eth_dev);
2594 * Callback used by rte_kvargs_process() during argument parsing.
2597 * Pointer to the parsed key (unused).
2599 * Pointer to the parsed value.
2601 * Pointer to the extra arguments which contains address of the
2602 * table of pointers to parsed interface names.
2608 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
2611 struct mrvl_ifnames *ifnames = extra_args;
2613 ifnames->names[ifnames->idx++] = value;
2619 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
2622 mrvl_deinit_hifs(void)
2626 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
2628 pp2_hif_deinit(hifs[i]);
2630 used_hifs = MRVL_MUSDK_HIFS_RESERVED;
2631 memset(hifs, 0, sizeof(hifs));
2635 * DPDK callback to register the virtual device.
2638 * Pointer to the virtual device.
2641 * 0 on success, negative error value otherwise.
2644 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
2646 struct rte_kvargs *kvlist;
2647 struct mrvl_ifnames ifnames;
2649 uint32_t i, ifnum, cfgnum;
2652 params = rte_vdev_device_args(vdev);
2656 kvlist = rte_kvargs_parse(params, valid_args);
2660 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
2661 if (ifnum > RTE_DIM(ifnames.names))
2662 goto out_free_kvlist;
2665 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
2666 mrvl_get_ifnames, &ifnames);
2670 * The below system initialization should be done only once,
2671 * on the first provided configuration file
2673 if (!mrvl_qos_cfg) {
2674 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
2675 MRVL_LOG(INFO, "Parsing config file!");
2677 MRVL_LOG(ERR, "Cannot handle more than one config file!");
2678 goto out_free_kvlist;
2679 } else if (cfgnum == 1) {
2680 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
2681 mrvl_get_qoscfg, &mrvl_qos_cfg);
2688 MRVL_LOG(INFO, "Perform MUSDK initializations");
2690 ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
2692 goto out_free_kvlist;
2694 ret = mrvl_init_pp2();
2696 MRVL_LOG(ERR, "Failed to init PP!");
2697 rte_mvep_deinit(MVEP_MOD_T_PP2);
2698 goto out_free_kvlist;
2701 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
2702 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
2704 mrvl_lcore_first = RTE_MAX_LCORE;
2705 mrvl_lcore_last = 0;
2708 for (i = 0; i < ifnum; i++) {
2709 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
2710 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
2714 mrvl_dev_num += ifnum;
2716 rte_kvargs_free(kvlist);
2721 mrvl_eth_dev_destroy(ifnames.names[i]);
2723 if (mrvl_dev_num == 0) {
2725 rte_mvep_deinit(MVEP_MOD_T_PP2);
2728 rte_kvargs_free(kvlist);
2734 * DPDK callback to remove virtual device.
2737 * Pointer to the removed virtual device.
2740 * 0 on success, negative error value otherwise.
2743 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
2748 name = rte_vdev_device_name(vdev);
2752 MRVL_LOG(INFO, "Removing %s", name);
2754 RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
2755 char ifname[RTE_ETH_NAME_MAX_LEN];
2757 rte_eth_dev_get_name_by_port(i, ifname);
2758 mrvl_eth_dev_destroy(ifname);
2762 if (mrvl_dev_num == 0) {
2763 MRVL_LOG(INFO, "Perform MUSDK deinit");
2766 rte_mvep_deinit(MVEP_MOD_T_PP2);
2772 static struct rte_vdev_driver pmd_mrvl_drv = {
2773 .probe = rte_pmd_mrvl_probe,
2774 .remove = rte_pmd_mrvl_remove,
2777 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
2778 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
2780 RTE_INIT(mrvl_init_log)
2782 mrvl_logtype = rte_log_register("pmd.net.mvpp2");
2783 if (mrvl_logtype >= 0)
2784 rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);