1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Marvell International Ltd.
3 * Copyright(c) 2017 Semihalf.
7 #include <rte_ethdev_driver.h>
8 #include <rte_kvargs.h>
10 #include <rte_malloc.h>
11 #include <rte_bus_vdev.h>
14 #include <linux/ethtool.h>
15 #include <linux/sockios.h>
17 #include <net/if_arp.h>
18 #include <sys/ioctl.h>
19 #include <sys/socket.h>
21 #include <sys/types.h>
23 #include <rte_mvep_common.h>
24 #include "mrvl_ethdev.h"
26 #include "mrvl_flow.h"
30 /* bitmask with reserved hifs */
31 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
32 /* bitmask with reserved bpools */
33 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
34 /* bitmask with reserved kernel RSS tables */
35 #define MRVL_MUSDK_RSS_RESERVED 0x01
36 /* maximum number of available hifs */
37 #define MRVL_MUSDK_HIFS_MAX 9
40 #define MRVL_MUSDK_PREFETCH_SHIFT 2
42 /* TCAM has 25 entries reserved for uc/mc filter entries */
43 #define MRVL_MAC_ADDRS_MAX 25
44 #define MRVL_MATCH_LEN 16
45 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
46 /* Maximum allowable packet size */
47 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
49 #define MRVL_IFACE_NAME_ARG "iface"
50 #define MRVL_CFG_ARG "cfg"
52 #define MRVL_BURST_SIZE 64
54 #define MRVL_ARP_LENGTH 28
56 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
58 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
59 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
61 /** Port Rx offload capabilities */
62 #define MRVL_RX_OFFLOADS (DEV_RX_OFFLOAD_VLAN_FILTER | \
63 DEV_RX_OFFLOAD_JUMBO_FRAME | \
64 DEV_RX_OFFLOAD_CHECKSUM)
66 /** Port Tx offloads capabilities */
67 #define MRVL_TX_OFFLOADS (DEV_TX_OFFLOAD_IPV4_CKSUM | \
68 DEV_TX_OFFLOAD_UDP_CKSUM | \
69 DEV_TX_OFFLOAD_TCP_CKSUM)
71 static const char * const valid_args[] = {
77 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
78 static struct pp2_hif *hifs[RTE_MAX_LCORE];
79 static int used_bpools[PP2_NUM_PKT_PROC] = {
80 [0 ... PP2_NUM_PKT_PROC - 1] = MRVL_MUSDK_BPOOLS_RESERVED
83 static struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
84 static int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
85 static uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
90 const char *names[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
95 * To use buffer harvesting based on loopback port shadow queue structure
96 * was introduced for buffers information bookkeeping.
98 * Before sending the packet, related buffer information (pp2_buff_inf) is
99 * stored in shadow queue. After packet is transmitted no longer used
100 * packet buffer is released back to it's original hardware pool,
101 * on condition it originated from interface.
102 * In case it was generated by application itself i.e: mbuf->port field is
103 * 0xff then its released to software mempool.
105 struct mrvl_shadow_txq {
106 int head; /* write index - used when sending buffers */
107 int tail; /* read index - used when releasing buffers */
108 u16 size; /* queue occupied size */
109 u16 num_to_release; /* number of buffers sent, that can be released */
110 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
114 struct mrvl_priv *priv;
115 struct rte_mempool *mp;
124 struct mrvl_priv *priv;
128 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_LCORE];
129 int tx_deferred_start;
132 static int mrvl_lcore_first;
133 static int mrvl_lcore_last;
134 static int mrvl_dev_num;
136 static int mrvl_fill_bpool(struct mrvl_rxq *rxq, int num);
137 static inline void mrvl_free_sent_buffers(struct pp2_ppio *ppio,
138 struct pp2_hif *hif, unsigned int core_id,
139 struct mrvl_shadow_txq *sq, int qid, int force);
141 #define MRVL_XSTATS_TBL_ENTRY(name) { \
142 #name, offsetof(struct pp2_ppio_statistics, name), \
143 sizeof(((struct pp2_ppio_statistics *)0)->name) \
146 /* Table with xstats data */
151 } mrvl_xstats_tbl[] = {
152 MRVL_XSTATS_TBL_ENTRY(rx_bytes),
153 MRVL_XSTATS_TBL_ENTRY(rx_packets),
154 MRVL_XSTATS_TBL_ENTRY(rx_unicast_packets),
155 MRVL_XSTATS_TBL_ENTRY(rx_errors),
156 MRVL_XSTATS_TBL_ENTRY(rx_fullq_dropped),
157 MRVL_XSTATS_TBL_ENTRY(rx_bm_dropped),
158 MRVL_XSTATS_TBL_ENTRY(rx_early_dropped),
159 MRVL_XSTATS_TBL_ENTRY(rx_fifo_dropped),
160 MRVL_XSTATS_TBL_ENTRY(rx_cls_dropped),
161 MRVL_XSTATS_TBL_ENTRY(tx_bytes),
162 MRVL_XSTATS_TBL_ENTRY(tx_packets),
163 MRVL_XSTATS_TBL_ENTRY(tx_unicast_packets),
164 MRVL_XSTATS_TBL_ENTRY(tx_errors)
168 mrvl_get_bpool_size(int pp2_id, int pool_id)
173 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
174 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
180 mrvl_reserve_bit(int *bitmap, int max)
182 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
193 mrvl_init_hif(int core_id)
195 struct pp2_hif_params params;
196 char match[MRVL_MATCH_LEN];
199 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
201 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
205 snprintf(match, sizeof(match), "hif-%d", ret);
206 memset(¶ms, 0, sizeof(params));
207 params.match = match;
208 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
209 ret = pp2_hif_init(¶ms, &hifs[core_id]);
211 MRVL_LOG(ERR, "Failed to initialize hif %d", core_id);
218 static inline struct pp2_hif*
219 mrvl_get_hif(struct mrvl_priv *priv, int core_id)
223 if (likely(hifs[core_id] != NULL))
224 return hifs[core_id];
226 rte_spinlock_lock(&priv->lock);
228 ret = mrvl_init_hif(core_id);
230 MRVL_LOG(ERR, "Failed to allocate hif %d", core_id);
234 if (core_id < mrvl_lcore_first)
235 mrvl_lcore_first = core_id;
237 if (core_id > mrvl_lcore_last)
238 mrvl_lcore_last = core_id;
240 rte_spinlock_unlock(&priv->lock);
242 return hifs[core_id];
246 * Configure rss based on dpdk rss configuration.
249 * Pointer to private structure.
251 * Pointer to RSS configuration.
254 * 0 on success, negative error value otherwise.
257 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
259 if (rss_conf->rss_key)
260 MRVL_LOG(WARNING, "Changing hash key is not supported");
262 if (rss_conf->rss_hf == 0) {
263 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
264 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
265 priv->ppio_params.inqs_params.hash_type =
266 PP2_PPIO_HASH_T_2_TUPLE;
267 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
268 priv->ppio_params.inqs_params.hash_type =
269 PP2_PPIO_HASH_T_5_TUPLE;
270 priv->rss_hf_tcp = 1;
271 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
272 priv->ppio_params.inqs_params.hash_type =
273 PP2_PPIO_HASH_T_5_TUPLE;
274 priv->rss_hf_tcp = 0;
283 * Ethernet device configuration.
285 * Prepare the driver for a given number of TX and RX queues and
289 * Pointer to Ethernet device structure.
292 * 0 on success, negative error value otherwise.
295 mrvl_dev_configure(struct rte_eth_dev *dev)
297 struct mrvl_priv *priv = dev->data->dev_private;
301 MRVL_LOG(INFO, "Device reconfiguration is not supported");
305 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
306 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
307 MRVL_LOG(INFO, "Unsupported rx multi queue mode %d",
308 dev->data->dev_conf.rxmode.mq_mode);
312 if (dev->data->dev_conf.rxmode.split_hdr_size) {
313 MRVL_LOG(INFO, "Split headers not supported");
317 if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
318 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
319 MRVL_PP2_ETH_HDRS_LEN;
321 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
322 dev->data->nb_rx_queues);
326 ret = mrvl_configure_txqs(priv, dev->data->port_id,
327 dev->data->nb_tx_queues);
331 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
332 priv->ppio_params.maintain_stats = 1;
333 priv->nb_rx_queues = dev->data->nb_rx_queues;
335 ret = mrvl_tm_init(dev);
339 if (dev->data->nb_rx_queues == 1 &&
340 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
341 MRVL_LOG(WARNING, "Disabling hash for 1 rx queue");
342 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
347 return mrvl_configure_rss(priv,
348 &dev->data->dev_conf.rx_adv_conf.rss_conf);
352 * DPDK callback to change the MTU.
354 * Setting the MTU affects hardware MRU (packets larger than the MRU
358 * Pointer to Ethernet device structure.
363 * 0 on success, negative error value otherwise.
366 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
368 struct mrvl_priv *priv = dev->data->dev_private;
370 uint16_t mbuf_data_size = 0; /* SW buffer size */
373 mru = MRVL_PP2_MTU_TO_MRU(mtu);
375 * min_rx_buf_size is equal to mbuf data size
376 * if pmd didn't set it differently
378 mbuf_data_size = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM;
380 * - setting mru greater than the mbuf size resulting in
381 * hw and sw buffer size mismatch
382 * - setting mtu that requires the support of scattered packets
383 * when this feature has not been enabled/supported so far
384 * (TODO check scattered_rx flag here once scattered RX is supported).
386 if (mru + MRVL_PKT_OFFS > mbuf_data_size) {
387 mru = mbuf_data_size - MRVL_PKT_OFFS;
388 mtu = MRVL_PP2_MRU_TO_MTU(mru);
389 MRVL_LOG(WARNING, "MTU too big, max MTU possible limitted "
390 "by current mbuf size: %u. Set MTU to %u, MRU to %u",
391 mbuf_data_size, mtu, mru);
394 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX) {
395 MRVL_LOG(ERR, "Invalid MTU [%u] or MRU [%u]", mtu, mru);
399 dev->data->mtu = mtu;
400 dev->data->dev_conf.rxmode.max_rx_pkt_len = mru - MV_MH_SIZE;
405 ret = pp2_ppio_set_mru(priv->ppio, mru);
407 MRVL_LOG(ERR, "Failed to change MRU");
411 ret = pp2_ppio_set_mtu(priv->ppio, mtu);
413 MRVL_LOG(ERR, "Failed to change MTU");
421 * DPDK callback to bring the link up.
424 * Pointer to Ethernet device structure.
427 * 0 on success, negative error value otherwise.
430 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
432 struct mrvl_priv *priv = dev->data->dev_private;
438 ret = pp2_ppio_enable(priv->ppio);
443 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
444 * as pp2_ppio_enable() changes port->t_mode from default 0 to
445 * PP2_TRAFFIC_INGRESS_EGRESS.
447 * Set mtu to default DPDK value here.
449 ret = mrvl_mtu_set(dev, dev->data->mtu);
451 pp2_ppio_disable(priv->ppio);
457 * DPDK callback to bring the link down.
460 * Pointer to Ethernet device structure.
463 * 0 on success, negative error value otherwise.
466 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
468 struct mrvl_priv *priv = dev->data->dev_private;
473 return pp2_ppio_disable(priv->ppio);
477 * DPDK callback to start tx queue.
480 * Pointer to Ethernet device structure.
482 * Transmit queue index.
485 * 0 on success, negative error value otherwise.
488 mrvl_tx_queue_start(struct rte_eth_dev *dev, uint16_t queue_id)
490 struct mrvl_priv *priv = dev->data->dev_private;
496 /* passing 1 enables given tx queue */
497 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 1);
499 MRVL_LOG(ERR, "Failed to start txq %d", queue_id);
503 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
509 * DPDK callback to stop tx queue.
512 * Pointer to Ethernet device structure.
514 * Transmit queue index.
517 * 0 on success, negative error value otherwise.
520 mrvl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t queue_id)
522 struct mrvl_priv *priv = dev->data->dev_private;
528 /* passing 0 disables given tx queue */
529 ret = pp2_ppio_set_outq_state(priv->ppio, queue_id, 0);
531 MRVL_LOG(ERR, "Failed to stop txq %d", queue_id);
535 dev->data->tx_queue_state[queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
541 * DPDK callback to start the device.
544 * Pointer to Ethernet device structure.
547 * 0 on success, negative errno value on failure.
550 mrvl_dev_start(struct rte_eth_dev *dev)
552 struct mrvl_priv *priv = dev->data->dev_private;
553 char match[MRVL_MATCH_LEN];
554 int ret = 0, i, def_init_size;
557 return mrvl_dev_set_link_up(dev);
559 snprintf(match, sizeof(match), "ppio-%d:%d",
560 priv->pp_id, priv->ppio_id);
561 priv->ppio_params.match = match;
564 * Calculate the minimum bpool size for refill feature as follows:
565 * 2 default burst sizes multiply by number of rx queues.
566 * If the bpool size will be below this value, new buffers will
567 * be added to the pool.
569 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
571 /* In case initial bpool size configured in queues setup is
572 * smaller than minimum size add more buffers
574 def_init_size = priv->bpool_min_size + MRVL_BURST_SIZE * 2;
575 if (priv->bpool_init_size < def_init_size) {
576 int buffs_to_add = def_init_size - priv->bpool_init_size;
578 priv->bpool_init_size += buffs_to_add;
579 ret = mrvl_fill_bpool(dev->data->rx_queues[0], buffs_to_add);
581 MRVL_LOG(ERR, "Failed to add buffers to bpool");
585 * Calculate the maximum bpool size for refill feature as follows:
586 * maximum number of descriptors in rx queue multiply by number
587 * of rx queues plus minimum bpool size.
588 * In case the bpool size will exceed this value, superfluous buffers
591 priv->bpool_max_size = (priv->nb_rx_queues * MRVL_PP2_RXD_MAX) +
592 priv->bpool_min_size;
594 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
596 MRVL_LOG(ERR, "Failed to init ppio");
601 * In case there are some some stale uc/mc mac addresses flush them
602 * here. It cannot be done during mrvl_dev_close() as port information
603 * is already gone at that point (due to pp2_ppio_deinit() in
606 if (!priv->uc_mc_flushed) {
607 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
610 "Failed to flush uc/mc filter list");
613 priv->uc_mc_flushed = 1;
616 if (!priv->vlan_flushed) {
617 ret = pp2_ppio_flush_vlan(priv->ppio);
619 MRVL_LOG(ERR, "Failed to flush vlan list");
622 * once pp2_ppio_flush_vlan() is supported jump to out
626 priv->vlan_flushed = 1;
628 ret = mrvl_mtu_set(dev, dev->data->mtu);
630 MRVL_LOG(ERR, "Failed to set MTU to %d", dev->data->mtu);
632 /* For default QoS config, don't start classifier. */
634 mrvl_qos_cfg->port[dev->data->port_id].use_global_defaults == 0) {
635 ret = mrvl_start_qos_mapping(priv);
637 MRVL_LOG(ERR, "Failed to setup QoS mapping");
642 ret = mrvl_dev_set_link_up(dev);
644 MRVL_LOG(ERR, "Failed to set link up");
648 /* start tx queues */
649 for (i = 0; i < dev->data->nb_tx_queues; i++) {
650 struct mrvl_txq *txq = dev->data->tx_queues[i];
652 dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STARTED;
654 if (!txq->tx_deferred_start)
658 * All txqs are started by default. Stop them
659 * so that tx_deferred_start works as expected.
661 ret = mrvl_tx_queue_stop(dev, i);
671 MRVL_LOG(ERR, "Failed to start device");
672 pp2_ppio_deinit(priv->ppio);
677 * Flush receive queues.
680 * Pointer to Ethernet device structure.
683 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
687 MRVL_LOG(INFO, "Flushing rx queues");
688 for (i = 0; i < dev->data->nb_rx_queues; i++) {
692 struct mrvl_rxq *q = dev->data->rx_queues[i];
693 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
695 num = MRVL_PP2_RXD_MAX;
696 ret = pp2_ppio_recv(q->priv->ppio,
697 q->priv->rxq_map[q->queue_id].tc,
698 q->priv->rxq_map[q->queue_id].inq,
699 descs, (uint16_t *)&num);
700 } while (ret == 0 && num);
705 * Flush transmit shadow queues.
708 * Pointer to Ethernet device structure.
711 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
714 struct mrvl_txq *txq;
716 MRVL_LOG(INFO, "Flushing tx shadow queues");
717 for (i = 0; i < dev->data->nb_tx_queues; i++) {
718 txq = (struct mrvl_txq *)dev->data->tx_queues[i];
720 for (j = 0; j < RTE_MAX_LCORE; j++) {
721 struct mrvl_shadow_txq *sq;
726 sq = &txq->shadow_txqs[j];
727 mrvl_free_sent_buffers(txq->priv->ppio,
728 hifs[j], j, sq, txq->queue_id, 1);
729 while (sq->tail != sq->head) {
730 uint64_t addr = cookie_addr_high |
731 sq->ent[sq->tail].buff.cookie;
733 (struct rte_mbuf *)addr);
734 sq->tail = (sq->tail + 1) &
735 MRVL_PP2_TX_SHADOWQ_MASK;
737 memset(sq, 0, sizeof(*sq));
743 * Flush hardware bpool (buffer-pool).
746 * Pointer to Ethernet device structure.
749 mrvl_flush_bpool(struct rte_eth_dev *dev)
751 struct mrvl_priv *priv = dev->data->dev_private;
755 unsigned int core_id = rte_lcore_id();
757 if (core_id == LCORE_ID_ANY)
760 hif = mrvl_get_hif(priv, core_id);
762 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
764 MRVL_LOG(ERR, "Failed to get bpool buffers number");
769 struct pp2_buff_inf inf;
772 ret = pp2_bpool_get_buff(hif, priv->bpool, &inf);
776 addr = cookie_addr_high | inf.cookie;
777 rte_pktmbuf_free((struct rte_mbuf *)addr);
782 * DPDK callback to stop the device.
785 * Pointer to Ethernet device structure.
788 mrvl_dev_stop(struct rte_eth_dev *dev)
790 mrvl_dev_set_link_down(dev);
794 * DPDK callback to close the device.
797 * Pointer to Ethernet device structure.
800 mrvl_dev_close(struct rte_eth_dev *dev)
802 struct mrvl_priv *priv = dev->data->dev_private;
805 mrvl_flush_rx_queues(dev);
806 mrvl_flush_tx_shadow_queues(dev);
807 mrvl_flow_deinit(dev);
808 mrvl_mtr_deinit(dev);
810 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
811 struct pp2_ppio_tc_params *tc_params =
812 &priv->ppio_params.inqs_params.tcs_params[i];
814 if (tc_params->inqs_params) {
815 rte_free(tc_params->inqs_params);
816 tc_params->inqs_params = NULL;
821 pp2_cls_tbl_deinit(priv->cls_tbl);
822 priv->cls_tbl = NULL;
826 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
827 priv->qos_tbl = NULL;
830 mrvl_flush_bpool(dev);
834 pp2_ppio_deinit(priv->ppio);
838 /* policer must be released after ppio deinitialization */
839 if (priv->default_policer) {
840 pp2_cls_plcr_deinit(priv->default_policer);
841 priv->default_policer = NULL;
846 * DPDK callback to retrieve physical link information.
849 * Pointer to Ethernet device structure.
850 * @param wait_to_complete
851 * Wait for request completion (ignored).
854 * 0 on success, negative error value otherwise.
857 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
861 * once MUSDK provides necessary API use it here
863 struct mrvl_priv *priv = dev->data->dev_private;
864 struct ethtool_cmd edata;
866 int ret, fd, link_up;
871 edata.cmd = ETHTOOL_GSET;
873 strcpy(req.ifr_name, dev->data->name);
874 req.ifr_data = (void *)&edata;
876 fd = socket(AF_INET, SOCK_DGRAM, 0);
880 ret = ioctl(fd, SIOCETHTOOL, &req);
888 switch (ethtool_cmd_speed(&edata)) {
890 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
893 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
896 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
899 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
902 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
905 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
906 ETH_LINK_HALF_DUPLEX;
907 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
909 pp2_ppio_get_link_state(priv->ppio, &link_up);
910 dev->data->dev_link.link_status = link_up ? ETH_LINK_UP : ETH_LINK_DOWN;
916 * DPDK callback to enable promiscuous mode.
919 * Pointer to Ethernet device structure.
922 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
924 struct mrvl_priv *priv = dev->data->dev_private;
933 ret = pp2_ppio_set_promisc(priv->ppio, 1);
935 MRVL_LOG(ERR, "Failed to enable promiscuous mode");
939 * DPDK callback to enable allmulti mode.
942 * Pointer to Ethernet device structure.
945 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
947 struct mrvl_priv *priv = dev->data->dev_private;
956 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
958 MRVL_LOG(ERR, "Failed enable all-multicast mode");
962 * DPDK callback to disable promiscuous mode.
965 * Pointer to Ethernet device structure.
968 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
970 struct mrvl_priv *priv = dev->data->dev_private;
976 ret = pp2_ppio_set_promisc(priv->ppio, 0);
978 MRVL_LOG(ERR, "Failed to disable promiscuous mode");
982 * DPDK callback to disable allmulticast mode.
985 * Pointer to Ethernet device structure.
988 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
990 struct mrvl_priv *priv = dev->data->dev_private;
996 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
998 MRVL_LOG(ERR, "Failed to disable all-multicast mode");
1002 * DPDK callback to remove a MAC address.
1005 * Pointer to Ethernet device structure.
1007 * MAC address index.
1010 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
1012 struct mrvl_priv *priv = dev->data->dev_private;
1013 char buf[ETHER_ADDR_FMT_SIZE];
1022 ret = pp2_ppio_remove_mac_addr(priv->ppio,
1023 dev->data->mac_addrs[index].addr_bytes);
1025 ether_format_addr(buf, sizeof(buf),
1026 &dev->data->mac_addrs[index]);
1027 MRVL_LOG(ERR, "Failed to remove mac %s", buf);
1032 * DPDK callback to add a MAC address.
1035 * Pointer to Ethernet device structure.
1037 * MAC address to register.
1039 * MAC address index.
1041 * VMDq pool index to associate address with (unused).
1044 * 0 on success, negative error value otherwise.
1047 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
1048 uint32_t index, uint32_t vmdq __rte_unused)
1050 struct mrvl_priv *priv = dev->data->dev_private;
1051 char buf[ETHER_ADDR_FMT_SIZE];
1058 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
1065 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
1066 * parameter uc_filter_max. Maximum number of mc addresses is then
1067 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
1070 * If more than uc_filter_max uc addresses were added to filter list
1071 * then NIC will switch to promiscuous mode automatically.
1073 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
1074 * were added to filter list then NIC will switch to all-multicast mode
1077 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
1079 ether_format_addr(buf, sizeof(buf), mac_addr);
1080 MRVL_LOG(ERR, "Failed to add mac %s", buf);
1088 * DPDK callback to set the primary MAC address.
1091 * Pointer to Ethernet device structure.
1093 * MAC address to register.
1096 * 0 on success, negative error value otherwise.
1099 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1101 struct mrvl_priv *priv = dev->data->dev_private;
1110 ret = pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
1112 char buf[ETHER_ADDR_FMT_SIZE];
1113 ether_format_addr(buf, sizeof(buf), mac_addr);
1114 MRVL_LOG(ERR, "Failed to set mac to %s", buf);
1121 * DPDK callback to get device statistics.
1124 * Pointer to Ethernet device structure.
1126 * Stats structure output buffer.
1129 * 0 on success, negative error value otherwise.
1132 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1134 struct mrvl_priv *priv = dev->data->dev_private;
1135 struct pp2_ppio_statistics ppio_stats;
1136 uint64_t drop_mac = 0;
1137 unsigned int i, idx, ret;
1142 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1143 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1144 struct pp2_ppio_inq_statistics rx_stats;
1149 idx = rxq->queue_id;
1150 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1152 "rx queue %d stats out of range (0 - %d)",
1153 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1157 ret = pp2_ppio_inq_get_statistics(priv->ppio,
1158 priv->rxq_map[idx].tc,
1159 priv->rxq_map[idx].inq,
1161 if (unlikely(ret)) {
1163 "Failed to update rx queue %d stats", idx);
1167 stats->q_ibytes[idx] = rxq->bytes_recv;
1168 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
1169 stats->q_errors[idx] = rx_stats.drop_early +
1170 rx_stats.drop_fullq +
1173 stats->ibytes += rxq->bytes_recv;
1174 drop_mac += rxq->drop_mac;
1177 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1178 struct mrvl_txq *txq = dev->data->tx_queues[i];
1179 struct pp2_ppio_outq_statistics tx_stats;
1184 idx = txq->queue_id;
1185 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
1187 "tx queue %d stats out of range (0 - %d)",
1188 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
1191 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
1193 if (unlikely(ret)) {
1195 "Failed to update tx queue %d stats", idx);
1199 stats->q_opackets[idx] = tx_stats.deq_desc;
1200 stats->q_obytes[idx] = txq->bytes_sent;
1201 stats->obytes += txq->bytes_sent;
1204 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1205 if (unlikely(ret)) {
1206 MRVL_LOG(ERR, "Failed to update port statistics");
1210 stats->ipackets += ppio_stats.rx_packets - drop_mac;
1211 stats->opackets += ppio_stats.tx_packets;
1212 stats->imissed += ppio_stats.rx_fullq_dropped +
1213 ppio_stats.rx_bm_dropped +
1214 ppio_stats.rx_early_dropped +
1215 ppio_stats.rx_fifo_dropped +
1216 ppio_stats.rx_cls_dropped;
1217 stats->ierrors = drop_mac;
1223 * DPDK callback to clear device statistics.
1226 * Pointer to Ethernet device structure.
1229 mrvl_stats_reset(struct rte_eth_dev *dev)
1231 struct mrvl_priv *priv = dev->data->dev_private;
1237 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1238 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
1240 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
1241 priv->rxq_map[i].inq, NULL, 1);
1242 rxq->bytes_recv = 0;
1246 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1247 struct mrvl_txq *txq = dev->data->tx_queues[i];
1249 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
1250 txq->bytes_sent = 0;
1253 pp2_ppio_get_statistics(priv->ppio, NULL, 1);
1257 * DPDK callback to get extended statistics.
1260 * Pointer to Ethernet device structure.
1262 * Pointer to xstats table.
1264 * Number of entries in xstats table.
1266 * Negative value on error, number of read xstats otherwise.
1269 mrvl_xstats_get(struct rte_eth_dev *dev,
1270 struct rte_eth_xstat *stats, unsigned int n)
1272 struct mrvl_priv *priv = dev->data->dev_private;
1273 struct pp2_ppio_statistics ppio_stats;
1279 pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
1280 for (i = 0; i < n && i < RTE_DIM(mrvl_xstats_tbl); i++) {
1283 if (mrvl_xstats_tbl[i].size == sizeof(uint32_t))
1284 val = *(uint32_t *)((uint8_t *)&ppio_stats +
1285 mrvl_xstats_tbl[i].offset);
1286 else if (mrvl_xstats_tbl[i].size == sizeof(uint64_t))
1287 val = *(uint64_t *)((uint8_t *)&ppio_stats +
1288 mrvl_xstats_tbl[i].offset);
1293 stats[i].value = val;
1300 * DPDK callback to reset extended statistics.
1303 * Pointer to Ethernet device structure.
1306 mrvl_xstats_reset(struct rte_eth_dev *dev)
1308 mrvl_stats_reset(dev);
1312 * DPDK callback to get extended statistics names.
1314 * @param dev (unused)
1315 * Pointer to Ethernet device structure.
1316 * @param xstats_names
1317 * Pointer to xstats names table.
1319 * Size of the xstats names table.
1321 * Number of read names.
1324 mrvl_xstats_get_names(struct rte_eth_dev *dev __rte_unused,
1325 struct rte_eth_xstat_name *xstats_names,
1331 return RTE_DIM(mrvl_xstats_tbl);
1333 for (i = 0; i < size && i < RTE_DIM(mrvl_xstats_tbl); i++)
1334 snprintf(xstats_names[i].name, RTE_ETH_XSTATS_NAME_SIZE, "%s",
1335 mrvl_xstats_tbl[i].name);
1341 * DPDK callback to get information about the device.
1344 * Pointer to Ethernet device structure (unused).
1346 * Info structure output buffer.
1349 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
1350 struct rte_eth_dev_info *info)
1352 info->speed_capa = ETH_LINK_SPEED_10M |
1353 ETH_LINK_SPEED_100M |
1357 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
1358 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
1359 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
1361 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
1362 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
1363 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
1365 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
1366 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
1367 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1369 info->rx_offload_capa = MRVL_RX_OFFLOADS;
1370 info->rx_queue_offload_capa = MRVL_RX_OFFLOADS;
1372 info->tx_offload_capa = MRVL_TX_OFFLOADS;
1373 info->tx_queue_offload_capa = MRVL_TX_OFFLOADS;
1375 info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1376 ETH_RSS_NONFRAG_IPV4_TCP |
1377 ETH_RSS_NONFRAG_IPV4_UDP;
1379 /* By default packets are dropped if no descriptors are available */
1380 info->default_rxconf.rx_drop_en = 1;
1382 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1386 * Return supported packet types.
1389 * Pointer to Ethernet device structure (unused).
1392 * Const pointer to the table with supported packet types.
1394 static const uint32_t *
1395 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1397 static const uint32_t ptypes[] = {
1399 RTE_PTYPE_L2_ETHER_VLAN,
1400 RTE_PTYPE_L2_ETHER_QINQ,
1402 RTE_PTYPE_L3_IPV4_EXT,
1403 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1405 RTE_PTYPE_L3_IPV6_EXT,
1406 RTE_PTYPE_L2_ETHER_ARP,
1415 * DPDK callback to get information about specific receive queue.
1418 * Pointer to Ethernet device structure.
1419 * @param rx_queue_id
1420 * Receive queue index.
1422 * Receive queue information structure.
1424 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1425 struct rte_eth_rxq_info *qinfo)
1427 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1428 struct mrvl_priv *priv = dev->data->dev_private;
1429 int inq = priv->rxq_map[rx_queue_id].inq;
1430 int tc = priv->rxq_map[rx_queue_id].tc;
1431 struct pp2_ppio_tc_params *tc_params =
1432 &priv->ppio_params.inqs_params.tcs_params[tc];
1435 qinfo->nb_desc = tc_params->inqs_params[inq].size;
1439 * DPDK callback to get information about specific transmit queue.
1442 * Pointer to Ethernet device structure.
1443 * @param tx_queue_id
1444 * Transmit queue index.
1446 * Transmit queue information structure.
1448 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1449 struct rte_eth_txq_info *qinfo)
1451 struct mrvl_priv *priv = dev->data->dev_private;
1452 struct mrvl_txq *txq = dev->data->tx_queues[tx_queue_id];
1455 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1456 qinfo->conf.tx_deferred_start = txq->tx_deferred_start;
1460 * DPDK callback to Configure a VLAN filter.
1463 * Pointer to Ethernet device structure.
1465 * VLAN ID to filter.
1470 * 0 on success, negative error value otherwise.
1473 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1475 struct mrvl_priv *priv = dev->data->dev_private;
1483 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1484 pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1488 * Release buffers to hardware bpool (buffer-pool)
1491 * Receive queue pointer.
1493 * Number of buffers to release to bpool.
1496 * 0 on success, negative error value otherwise.
1499 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1501 struct buff_release_entry entries[MRVL_PP2_RXD_MAX];
1502 struct rte_mbuf *mbufs[MRVL_PP2_RXD_MAX];
1504 unsigned int core_id;
1505 struct pp2_hif *hif;
1506 struct pp2_bpool *bpool;
1508 core_id = rte_lcore_id();
1509 if (core_id == LCORE_ID_ANY)
1512 hif = mrvl_get_hif(rxq->priv, core_id);
1516 bpool = rxq->priv->bpool;
1518 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1522 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1524 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1526 for (i = 0; i < num; i++) {
1527 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1528 != cookie_addr_high) {
1530 "mbuf virtual addr high 0x%lx out of range",
1531 (uint64_t)mbufs[i] >> 32);
1535 entries[i].buff.addr =
1536 rte_mbuf_data_iova_default(mbufs[i]);
1537 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
1538 entries[i].bpool = bpool;
1541 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1542 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1549 for (; i < num; i++)
1550 rte_pktmbuf_free(mbufs[i]);
1556 * DPDK callback to configure the receive queue.
1559 * Pointer to Ethernet device structure.
1563 * Number of descriptors to configure in queue.
1565 * NUMA socket on which memory must be allocated.
1567 * Thresholds parameters.
1569 * Memory pool for buffer allocations.
1572 * 0 on success, negative error value otherwise.
1575 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1576 unsigned int socket,
1577 const struct rte_eth_rxconf *conf,
1578 struct rte_mempool *mp)
1580 struct mrvl_priv *priv = dev->data->dev_private;
1581 struct mrvl_rxq *rxq;
1582 uint32_t frame_size, buf_size = rte_pktmbuf_data_room_size(mp);
1583 uint32_t max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1587 offloads = conf->offloads | dev->data->dev_conf.rxmode.offloads;
1589 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1591 * Unknown TC mapping, mapping will not have a correct queue.
1593 MRVL_LOG(ERR, "Unknown TC mapping for queue %hu eth%hhu",
1594 idx, priv->ppio_id);
1598 frame_size = buf_size - RTE_PKTMBUF_HEADROOM - MRVL_PKT_EFFEC_OFFS;
1599 if (frame_size < max_rx_pkt_len) {
1601 "Mbuf size must be increased to %u bytes to hold up "
1602 "to %u bytes of data.",
1603 buf_size + max_rx_pkt_len - frame_size,
1605 dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
1606 MRVL_LOG(INFO, "Setting max rx pkt len to %u",
1607 dev->data->dev_conf.rxmode.max_rx_pkt_len);
1610 if (dev->data->rx_queues[idx]) {
1611 rte_free(dev->data->rx_queues[idx]);
1612 dev->data->rx_queues[idx] = NULL;
1615 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
1621 rxq->cksum_enabled = offloads & DEV_RX_OFFLOAD_IPV4_CKSUM;
1622 rxq->queue_id = idx;
1623 rxq->port_id = dev->data->port_id;
1624 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
1626 tc = priv->rxq_map[rxq->queue_id].tc,
1627 inq = priv->rxq_map[rxq->queue_id].inq;
1628 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
1631 ret = mrvl_fill_bpool(rxq, desc);
1637 priv->bpool_init_size += desc;
1639 dev->data->rx_queues[idx] = rxq;
1645 * DPDK callback to release the receive queue.
1648 * Generic receive queue pointer.
1651 mrvl_rx_queue_release(void *rxq)
1653 struct mrvl_rxq *q = rxq;
1654 struct pp2_ppio_tc_params *tc_params;
1655 int i, num, tc, inq;
1656 struct pp2_hif *hif;
1657 unsigned int core_id = rte_lcore_id();
1659 if (core_id == LCORE_ID_ANY)
1665 hif = mrvl_get_hif(q->priv, core_id);
1670 tc = q->priv->rxq_map[q->queue_id].tc;
1671 inq = q->priv->rxq_map[q->queue_id].inq;
1672 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1673 num = tc_params->inqs_params[inq].size;
1674 for (i = 0; i < num; i++) {
1675 struct pp2_buff_inf inf;
1678 pp2_bpool_get_buff(hif, q->priv->bpool, &inf);
1679 addr = cookie_addr_high | inf.cookie;
1680 rte_pktmbuf_free((struct rte_mbuf *)addr);
1687 * DPDK callback to configure the transmit queue.
1690 * Pointer to Ethernet device structure.
1692 * Transmit queue index.
1694 * Number of descriptors to configure in the queue.
1696 * NUMA socket on which memory must be allocated.
1698 * Tx queue configuration parameters.
1701 * 0 on success, negative error value otherwise.
1704 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1705 unsigned int socket,
1706 const struct rte_eth_txconf *conf)
1708 struct mrvl_priv *priv = dev->data->dev_private;
1709 struct mrvl_txq *txq;
1711 if (dev->data->tx_queues[idx]) {
1712 rte_free(dev->data->tx_queues[idx]);
1713 dev->data->tx_queues[idx] = NULL;
1716 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1721 txq->queue_id = idx;
1722 txq->port_id = dev->data->port_id;
1723 txq->tx_deferred_start = conf->tx_deferred_start;
1724 dev->data->tx_queues[idx] = txq;
1726 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1732 * DPDK callback to release the transmit queue.
1735 * Generic transmit queue pointer.
1738 mrvl_tx_queue_release(void *txq)
1740 struct mrvl_txq *q = txq;
1749 * DPDK callback to get flow control configuration.
1752 * Pointer to Ethernet device structure.
1754 * Pointer to the flow control configuration.
1757 * 0 on success, negative error value otherwise.
1760 mrvl_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1762 struct mrvl_priv *priv = dev->data->dev_private;
1768 ret = pp2_ppio_get_rx_pause(priv->ppio, &en);
1770 MRVL_LOG(ERR, "Failed to read rx pause state");
1774 fc_conf->mode = en ? RTE_FC_RX_PAUSE : RTE_FC_NONE;
1780 * DPDK callback to set flow control configuration.
1783 * Pointer to Ethernet device structure.
1785 * Pointer to the flow control configuration.
1788 * 0 on success, negative error value otherwise.
1791 mrvl_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
1793 struct mrvl_priv *priv = dev->data->dev_private;
1798 if (fc_conf->high_water ||
1799 fc_conf->low_water ||
1800 fc_conf->pause_time ||
1801 fc_conf->mac_ctrl_frame_fwd ||
1803 MRVL_LOG(ERR, "Flowctrl parameter is not supported");
1808 if (fc_conf->mode == RTE_FC_NONE ||
1809 fc_conf->mode == RTE_FC_RX_PAUSE) {
1812 en = fc_conf->mode == RTE_FC_NONE ? 0 : 1;
1813 ret = pp2_ppio_set_rx_pause(priv->ppio, en);
1816 "Failed to change flowctrl on RX side");
1825 * Update RSS hash configuration
1828 * Pointer to Ethernet device structure.
1830 * Pointer to RSS configuration.
1833 * 0 on success, negative error value otherwise.
1836 mrvl_rss_hash_update(struct rte_eth_dev *dev,
1837 struct rte_eth_rss_conf *rss_conf)
1839 struct mrvl_priv *priv = dev->data->dev_private;
1844 return mrvl_configure_rss(priv, rss_conf);
1848 * DPDK callback to get RSS hash configuration.
1851 * Pointer to Ethernet device structure.
1853 * Pointer to RSS configuration.
1859 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
1860 struct rte_eth_rss_conf *rss_conf)
1862 struct mrvl_priv *priv = dev->data->dev_private;
1863 enum pp2_ppio_hash_type hash_type =
1864 priv->ppio_params.inqs_params.hash_type;
1866 rss_conf->rss_key = NULL;
1868 if (hash_type == PP2_PPIO_HASH_T_NONE)
1869 rss_conf->rss_hf = 0;
1870 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
1871 rss_conf->rss_hf = ETH_RSS_IPV4;
1872 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
1873 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
1874 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
1875 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
1881 * DPDK callback to get rte_flow callbacks.
1884 * Pointer to the device structure.
1888 * Flow filter operation.
1890 * Pointer to pass the flow ops.
1893 * 0 on success, negative error value otherwise.
1896 mrvl_eth_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
1897 enum rte_filter_type filter_type,
1898 enum rte_filter_op filter_op, void *arg)
1900 switch (filter_type) {
1901 case RTE_ETH_FILTER_GENERIC:
1902 if (filter_op != RTE_ETH_FILTER_GET)
1904 *(const void **)arg = &mrvl_flow_ops;
1907 MRVL_LOG(WARNING, "Filter type (%d) not supported",
1914 * DPDK callback to get rte_mtr callbacks.
1917 * Pointer to the device structure.
1919 * Pointer to pass the mtr ops.
1925 mrvl_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
1927 *(const void **)ops = &mrvl_mtr_ops;
1933 * DPDK callback to get rte_tm callbacks.
1936 * Pointer to the device structure.
1938 * Pointer to pass the tm ops.
1944 mrvl_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops)
1946 *(const void **)ops = &mrvl_tm_ops;
1951 static const struct eth_dev_ops mrvl_ops = {
1952 .dev_configure = mrvl_dev_configure,
1953 .dev_start = mrvl_dev_start,
1954 .dev_stop = mrvl_dev_stop,
1955 .dev_set_link_up = mrvl_dev_set_link_up,
1956 .dev_set_link_down = mrvl_dev_set_link_down,
1957 .dev_close = mrvl_dev_close,
1958 .link_update = mrvl_link_update,
1959 .promiscuous_enable = mrvl_promiscuous_enable,
1960 .allmulticast_enable = mrvl_allmulticast_enable,
1961 .promiscuous_disable = mrvl_promiscuous_disable,
1962 .allmulticast_disable = mrvl_allmulticast_disable,
1963 .mac_addr_remove = mrvl_mac_addr_remove,
1964 .mac_addr_add = mrvl_mac_addr_add,
1965 .mac_addr_set = mrvl_mac_addr_set,
1966 .mtu_set = mrvl_mtu_set,
1967 .stats_get = mrvl_stats_get,
1968 .stats_reset = mrvl_stats_reset,
1969 .xstats_get = mrvl_xstats_get,
1970 .xstats_reset = mrvl_xstats_reset,
1971 .xstats_get_names = mrvl_xstats_get_names,
1972 .dev_infos_get = mrvl_dev_infos_get,
1973 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
1974 .rxq_info_get = mrvl_rxq_info_get,
1975 .txq_info_get = mrvl_txq_info_get,
1976 .vlan_filter_set = mrvl_vlan_filter_set,
1977 .tx_queue_start = mrvl_tx_queue_start,
1978 .tx_queue_stop = mrvl_tx_queue_stop,
1979 .rx_queue_setup = mrvl_rx_queue_setup,
1980 .rx_queue_release = mrvl_rx_queue_release,
1981 .tx_queue_setup = mrvl_tx_queue_setup,
1982 .tx_queue_release = mrvl_tx_queue_release,
1983 .flow_ctrl_get = mrvl_flow_ctrl_get,
1984 .flow_ctrl_set = mrvl_flow_ctrl_set,
1985 .rss_hash_update = mrvl_rss_hash_update,
1986 .rss_hash_conf_get = mrvl_rss_hash_conf_get,
1987 .filter_ctrl = mrvl_eth_filter_ctrl,
1988 .mtr_ops_get = mrvl_mtr_ops_get,
1989 .tm_ops_get = mrvl_tm_ops_get,
1993 * Return packet type information and l3/l4 offsets.
1996 * Pointer to the received packet descriptor.
2003 * Packet type information.
2005 static inline uint64_t
2006 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
2007 uint8_t *l3_offset, uint8_t *l4_offset)
2009 enum pp2_inq_l3_type l3_type;
2010 enum pp2_inq_l4_type l4_type;
2011 enum pp2_inq_vlan_tag vlan_tag;
2012 uint64_t packet_type;
2014 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
2015 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
2016 pp2_ppio_inq_desc_get_vlan_tag(desc, &vlan_tag);
2018 packet_type = RTE_PTYPE_L2_ETHER;
2021 case PP2_INQ_VLAN_TAG_SINGLE:
2022 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
2024 case PP2_INQ_VLAN_TAG_DOUBLE:
2025 case PP2_INQ_VLAN_TAG_TRIPLE:
2026 packet_type |= RTE_PTYPE_L2_ETHER_QINQ;
2033 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
2034 packet_type |= RTE_PTYPE_L3_IPV4;
2036 case PP2_INQ_L3_TYPE_IPV4_OK:
2037 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
2039 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
2040 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
2042 case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
2043 packet_type |= RTE_PTYPE_L3_IPV6;
2045 case PP2_INQ_L3_TYPE_IPV6_EXT:
2046 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
2048 case PP2_INQ_L3_TYPE_ARP:
2049 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
2051 * In case of ARP l4_offset is set to wrong value.
2052 * Set it to proper one so that later on mbuf->l3_len can be
2053 * calculated subtracting l4_offset and l3_offset.
2055 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
2058 MRVL_LOG(DEBUG, "Failed to recognise l3 packet type");
2063 case PP2_INQ_L4_TYPE_TCP:
2064 packet_type |= RTE_PTYPE_L4_TCP;
2066 case PP2_INQ_L4_TYPE_UDP:
2067 packet_type |= RTE_PTYPE_L4_UDP;
2070 MRVL_LOG(DEBUG, "Failed to recognise l4 packet type");
2078 * Get offload information from the received packet descriptor.
2081 * Pointer to the received packet descriptor.
2084 * Mbuf offload flags.
2086 static inline uint64_t
2087 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
2090 enum pp2_inq_desc_status status;
2092 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
2093 if (unlikely(status != PP2_DESC_ERR_OK))
2094 flags = PKT_RX_IP_CKSUM_BAD;
2096 flags = PKT_RX_IP_CKSUM_GOOD;
2098 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
2099 if (unlikely(status != PP2_DESC_ERR_OK))
2100 flags |= PKT_RX_L4_CKSUM_BAD;
2102 flags |= PKT_RX_L4_CKSUM_GOOD;
2108 * DPDK callback for receive.
2111 * Generic pointer to the receive queue.
2113 * Array to store received packets.
2115 * Maximum number of packets in array.
2118 * Number of packets successfully received.
2121 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
2123 struct mrvl_rxq *q = rxq;
2124 struct pp2_ppio_desc descs[nb_pkts];
2125 struct pp2_bpool *bpool;
2126 int i, ret, rx_done = 0;
2128 struct pp2_hif *hif;
2129 unsigned int core_id = rte_lcore_id();
2131 hif = mrvl_get_hif(q->priv, core_id);
2133 if (unlikely(!q->priv->ppio || !hif))
2136 bpool = q->priv->bpool;
2138 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
2139 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
2140 if (unlikely(ret < 0)) {
2141 MRVL_LOG(ERR, "Failed to receive packets");
2144 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
2146 for (i = 0; i < nb_pkts; i++) {
2147 struct rte_mbuf *mbuf;
2148 uint8_t l3_offset, l4_offset;
2149 enum pp2_inq_desc_status status;
2152 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2153 struct pp2_ppio_desc *pref_desc;
2156 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
2157 pref_addr = cookie_addr_high |
2158 pp2_ppio_inq_desc_get_cookie(pref_desc);
2159 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
2160 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
2163 addr = cookie_addr_high |
2164 pp2_ppio_inq_desc_get_cookie(&descs[i]);
2165 mbuf = (struct rte_mbuf *)addr;
2166 rte_pktmbuf_reset(mbuf);
2168 /* drop packet in case of mac, overrun or resource error */
2169 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
2170 if (unlikely(status != PP2_DESC_ERR_OK)) {
2171 struct pp2_buff_inf binf = {
2172 .addr = rte_mbuf_data_iova_default(mbuf),
2173 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
2176 pp2_bpool_put_buff(hif, bpool, &binf);
2177 mrvl_port_bpool_size
2178 [bpool->pp2_id][bpool->id][core_id]++;
2183 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
2184 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
2185 mbuf->data_len = mbuf->pkt_len;
2186 mbuf->port = q->port_id;
2188 mrvl_desc_to_packet_type_and_offset(&descs[i],
2191 mbuf->l2_len = l3_offset;
2192 mbuf->l3_len = l4_offset - l3_offset;
2194 if (likely(q->cksum_enabled))
2195 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
2197 rx_pkts[rx_done++] = mbuf;
2198 q->bytes_recv += mbuf->pkt_len;
2201 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
2202 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
2204 if (unlikely(num <= q->priv->bpool_min_size ||
2205 (!rx_done && num < q->priv->bpool_init_size))) {
2206 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
2208 MRVL_LOG(ERR, "Failed to fill bpool");
2209 } else if (unlikely(num > q->priv->bpool_max_size)) {
2211 int pkt_to_remove = num - q->priv->bpool_init_size;
2212 struct rte_mbuf *mbuf;
2213 struct pp2_buff_inf buff;
2216 "port-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)",
2217 bpool->pp2_id, q->priv->ppio->port_id,
2218 bpool->id, pkt_to_remove, num,
2219 q->priv->bpool_init_size);
2221 for (i = 0; i < pkt_to_remove; i++) {
2222 ret = pp2_bpool_get_buff(hif, bpool, &buff);
2225 mbuf = (struct rte_mbuf *)
2226 (cookie_addr_high | buff.cookie);
2227 rte_pktmbuf_free(mbuf);
2229 mrvl_port_bpool_size
2230 [bpool->pp2_id][bpool->id][core_id] -= i;
2232 rte_spinlock_unlock(&q->priv->lock);
2239 * Prepare offload information.
2243 * @param packet_type
2244 * Packet type bitfield.
2246 * Pointer to the pp2_ouq_l3_type structure.
2248 * Pointer to the pp2_outq_l4_type structure.
2249 * @param gen_l3_cksum
2250 * Will be set to 1 in case l3 checksum is computed.
2252 * Will be set to 1 in case l4 checksum is computed.
2255 * 0 on success, negative error value otherwise.
2258 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
2259 enum pp2_outq_l3_type *l3_type,
2260 enum pp2_outq_l4_type *l4_type,
2265 * Based on ol_flags prepare information
2266 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
2269 if (ol_flags & PKT_TX_IPV4) {
2270 *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
2271 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
2272 } else if (ol_flags & PKT_TX_IPV6) {
2273 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
2274 /* no checksum for ipv6 header */
2277 /* if something different then stop processing */
2281 ol_flags &= PKT_TX_L4_MASK;
2282 if ((packet_type & RTE_PTYPE_L4_TCP) &&
2283 ol_flags == PKT_TX_TCP_CKSUM) {
2284 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
2286 } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
2287 ol_flags == PKT_TX_UDP_CKSUM) {
2288 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
2291 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
2292 /* no checksum for other type */
2300 * Release already sent buffers to bpool (buffer-pool).
2303 * Pointer to the port structure.
2305 * Pointer to the MUSDK hardware interface.
2307 * Pointer to the shadow queue.
2311 * Force releasing packets.
2314 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
2315 unsigned int core_id, struct mrvl_shadow_txq *sq,
2318 struct buff_release_entry *entry;
2319 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
2322 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
2324 sq->num_to_release += nb_done;
2326 if (likely(!force &&
2327 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
2330 nb_done = sq->num_to_release;
2331 sq->num_to_release = 0;
2333 for (i = 0; i < nb_done; i++) {
2334 entry = &sq->ent[sq->tail + num];
2335 if (unlikely(!entry->buff.addr)) {
2337 "Shadow memory @%d: cookie(%lx), pa(%lx)!",
2338 sq->tail, (u64)entry->buff.cookie,
2339 (u64)entry->buff.addr);
2344 if (unlikely(!entry->bpool)) {
2345 struct rte_mbuf *mbuf;
2347 mbuf = (struct rte_mbuf *)
2348 (cookie_addr_high | entry->buff.cookie);
2349 rte_pktmbuf_free(mbuf);
2354 mrvl_port_bpool_size
2355 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
2357 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
2362 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2364 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2371 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
2372 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
2378 * DPDK callback for transmit.
2381 * Generic pointer transmit queue.
2383 * Packets to transmit.
2385 * Number of packets in array.
2388 * Number of packets successfully transmitted.
2391 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2393 struct mrvl_txq *q = txq;
2394 struct mrvl_shadow_txq *sq;
2395 struct pp2_hif *hif;
2396 struct pp2_ppio_desc descs[nb_pkts];
2397 unsigned int core_id = rte_lcore_id();
2398 int i, ret, bytes_sent = 0;
2399 uint16_t num, sq_free_size;
2402 hif = mrvl_get_hif(q->priv, core_id);
2403 sq = &q->shadow_txqs[core_id];
2405 if (unlikely(!q->priv->ppio || !hif))
2409 mrvl_free_sent_buffers(q->priv->ppio, hif, core_id,
2410 sq, q->queue_id, 0);
2412 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
2413 if (unlikely(nb_pkts > sq_free_size)) {
2415 "No room in shadow queue for %d packets! %d packets will be sent.",
2416 nb_pkts, sq_free_size);
2417 nb_pkts = sq_free_size;
2420 for (i = 0; i < nb_pkts; i++) {
2421 struct rte_mbuf *mbuf = tx_pkts[i];
2422 int gen_l3_cksum, gen_l4_cksum;
2423 enum pp2_outq_l3_type l3_type;
2424 enum pp2_outq_l4_type l4_type;
2426 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
2427 struct rte_mbuf *pref_pkt_hdr;
2429 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
2430 rte_mbuf_prefetch_part1(pref_pkt_hdr);
2431 rte_mbuf_prefetch_part2(pref_pkt_hdr);
2434 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
2435 sq->ent[sq->head].buff.addr =
2436 rte_mbuf_data_iova_default(mbuf);
2437 sq->ent[sq->head].bpool =
2438 (unlikely(mbuf->port >= RTE_MAX_ETHPORTS ||
2439 mbuf->refcnt > 1)) ? NULL :
2440 mrvl_port_to_bpool_lookup[mbuf->port];
2441 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
2444 pp2_ppio_outq_desc_reset(&descs[i]);
2445 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
2446 rte_pktmbuf_iova(mbuf));
2447 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
2448 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
2449 rte_pktmbuf_pkt_len(mbuf));
2451 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
2453 * in case unsupported ol_flags were passed
2454 * do not update descriptor offload information
2456 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
2457 &l3_type, &l4_type, &gen_l3_cksum,
2462 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
2464 mbuf->l2_len + mbuf->l3_len,
2465 gen_l3_cksum, gen_l4_cksum);
2469 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
2470 /* number of packets that were not sent */
2471 if (unlikely(num > nb_pkts)) {
2472 for (i = nb_pkts; i < num; i++) {
2473 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
2474 MRVL_PP2_TX_SHADOWQ_MASK;
2475 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
2477 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
2479 sq->size -= num - nb_pkts;
2482 q->bytes_sent += bytes_sent;
2488 * Initialize packet processor.
2491 * 0 on success, negative error value otherwise.
2496 struct pp2_init_params init_params;
2498 memset(&init_params, 0, sizeof(init_params));
2499 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
2500 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
2501 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
2503 return pp2_init(&init_params);
2507 * Deinitialize packet processor.
2510 * 0 on success, negative error value otherwise.
2513 mrvl_deinit_pp2(void)
2519 * Create private device structure.
2522 * Pointer to the port name passed in the initialization parameters.
2525 * Pointer to the newly allocated private device structure.
2527 static struct mrvl_priv *
2528 mrvl_priv_create(const char *dev_name)
2530 struct pp2_bpool_params bpool_params;
2531 char match[MRVL_MATCH_LEN];
2532 struct mrvl_priv *priv;
2535 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
2539 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
2540 &priv->pp_id, &priv->ppio_id);
2544 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
2545 PP2_BPOOL_NUM_POOLS);
2548 priv->bpool_bit = bpool_bit;
2550 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
2552 memset(&bpool_params, 0, sizeof(bpool_params));
2553 bpool_params.match = match;
2554 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
2555 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
2557 goto out_clear_bpool_bit;
2559 priv->ppio_params.type = PP2_PPIO_T_NIC;
2560 rte_spinlock_init(&priv->lock);
2563 out_clear_bpool_bit:
2564 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2571 * Create device representing Ethernet port.
2574 * Pointer to the port's name.
2577 * 0 on success, negative error value otherwise.
2580 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
2582 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
2583 struct rte_eth_dev *eth_dev;
2584 struct mrvl_priv *priv;
2587 eth_dev = rte_eth_dev_allocate(name);
2591 priv = mrvl_priv_create(name);
2597 eth_dev->data->mac_addrs =
2598 rte_zmalloc("mac_addrs",
2599 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
2600 if (!eth_dev->data->mac_addrs) {
2601 MRVL_LOG(ERR, "Failed to allocate space for eth addrs");
2606 memset(&req, 0, sizeof(req));
2607 strcpy(req.ifr_name, name);
2608 ret = ioctl(fd, SIOCGIFHWADDR, &req);
2612 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
2613 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
2615 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
2616 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
2617 eth_dev->data->kdrv = RTE_KDRV_NONE;
2618 eth_dev->data->dev_private = priv;
2619 eth_dev->device = &vdev->device;
2620 eth_dev->dev_ops = &mrvl_ops;
2622 rte_eth_dev_probing_finish(eth_dev);
2625 rte_free(eth_dev->data->mac_addrs);
2627 rte_eth_dev_release_port(eth_dev);
2635 * Cleanup previously created device representing Ethernet port.
2638 * Pointer to the port name.
2641 mrvl_eth_dev_destroy(const char *name)
2643 struct rte_eth_dev *eth_dev;
2644 struct mrvl_priv *priv;
2646 eth_dev = rte_eth_dev_allocated(name);
2650 priv = eth_dev->data->dev_private;
2651 pp2_bpool_deinit(priv->bpool);
2652 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
2654 rte_free(eth_dev->data->mac_addrs);
2655 rte_eth_dev_release_port(eth_dev);
2659 * Callback used by rte_kvargs_process() during argument parsing.
2662 * Pointer to the parsed key (unused).
2664 * Pointer to the parsed value.
2666 * Pointer to the extra arguments which contains address of the
2667 * table of pointers to parsed interface names.
2673 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
2676 struct mrvl_ifnames *ifnames = extra_args;
2678 ifnames->names[ifnames->idx++] = value;
2684 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
2687 mrvl_deinit_hifs(void)
2691 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++) {
2693 pp2_hif_deinit(hifs[i]);
2695 used_hifs = MRVL_MUSDK_HIFS_RESERVED;
2696 memset(hifs, 0, sizeof(hifs));
2700 * DPDK callback to register the virtual device.
2703 * Pointer to the virtual device.
2706 * 0 on success, negative error value otherwise.
2709 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
2711 struct rte_kvargs *kvlist;
2712 struct mrvl_ifnames ifnames;
2714 uint32_t i, ifnum, cfgnum;
2717 params = rte_vdev_device_args(vdev);
2721 kvlist = rte_kvargs_parse(params, valid_args);
2725 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
2726 if (ifnum > RTE_DIM(ifnames.names))
2727 goto out_free_kvlist;
2730 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
2731 mrvl_get_ifnames, &ifnames);
2735 * The below system initialization should be done only once,
2736 * on the first provided configuration file
2738 if (!mrvl_qos_cfg) {
2739 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
2740 MRVL_LOG(INFO, "Parsing config file!");
2742 MRVL_LOG(ERR, "Cannot handle more than one config file!");
2743 goto out_free_kvlist;
2744 } else if (cfgnum == 1) {
2745 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
2746 mrvl_get_qoscfg, &mrvl_qos_cfg);
2753 MRVL_LOG(INFO, "Perform MUSDK initializations");
2755 ret = rte_mvep_init(MVEP_MOD_T_PP2, kvlist);
2757 goto out_free_kvlist;
2759 ret = mrvl_init_pp2();
2761 MRVL_LOG(ERR, "Failed to init PP!");
2762 rte_mvep_deinit(MVEP_MOD_T_PP2);
2763 goto out_free_kvlist;
2766 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
2767 memset(mrvl_port_to_bpool_lookup, 0, sizeof(mrvl_port_to_bpool_lookup));
2769 mrvl_lcore_first = RTE_MAX_LCORE;
2770 mrvl_lcore_last = 0;
2773 for (i = 0; i < ifnum; i++) {
2774 MRVL_LOG(INFO, "Creating %s", ifnames.names[i]);
2775 ret = mrvl_eth_dev_create(vdev, ifnames.names[i]);
2779 mrvl_dev_num += ifnum;
2781 rte_kvargs_free(kvlist);
2786 mrvl_eth_dev_destroy(ifnames.names[i]);
2788 if (mrvl_dev_num == 0) {
2790 rte_mvep_deinit(MVEP_MOD_T_PP2);
2793 rte_kvargs_free(kvlist);
2799 * DPDK callback to remove virtual device.
2802 * Pointer to the removed virtual device.
2805 * 0 on success, negative error value otherwise.
2808 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
2813 name = rte_vdev_device_name(vdev);
2817 MRVL_LOG(INFO, "Removing %s", name);
2819 RTE_ETH_FOREACH_DEV(i) { /* FIXME: removing all devices! */
2820 char ifname[RTE_ETH_NAME_MAX_LEN];
2822 rte_eth_dev_get_name_by_port(i, ifname);
2823 mrvl_eth_dev_destroy(ifname);
2827 if (mrvl_dev_num == 0) {
2828 MRVL_LOG(INFO, "Perform MUSDK deinit");
2831 rte_mvep_deinit(MVEP_MOD_T_PP2);
2837 static struct rte_vdev_driver pmd_mrvl_drv = {
2838 .probe = rte_pmd_mrvl_probe,
2839 .remove = rte_pmd_mrvl_remove,
2842 RTE_PMD_REGISTER_VDEV(net_mvpp2, pmd_mrvl_drv);
2843 RTE_PMD_REGISTER_ALIAS(net_mvpp2, eth_mvpp2);
2845 RTE_INIT(mrvl_init_log)
2847 mrvl_logtype = rte_log_register("pmd.net.mvpp2");
2848 if (mrvl_logtype >= 0)
2849 rte_log_set_level(mrvl_logtype, RTE_LOG_NOTICE);