4 * Copyright(c) 2017 Semihalf. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Semihalf nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_ethdev.h>
34 #include <rte_kvargs.h>
36 #include <rte_malloc.h>
39 /* Unluckily, container_of is defined by both DPDK and MUSDK,
40 * we'll declare only one version.
42 * Note that it is not used in this PMD anyway.
48 #include <drivers/mv_pp2.h>
49 #include <drivers/mv_pp2_bpool.h>
50 #include <drivers/mv_pp2_hif.h>
53 #include <linux/ethtool.h>
54 #include <linux/sockios.h>
56 #include <net/if_arp.h>
57 #include <sys/ioctl.h>
58 #include <sys/socket.h>
60 #include <sys/types.h>
62 #include "mrvl_ethdev.h"
65 /* bitmask with reserved hifs */
66 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
67 /* bitmask with reserved bpools */
68 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
69 /* maximum number of available hifs */
70 #define MRVL_MUSDK_HIFS_MAX 9
72 #define MRVL_MAC_ADDRS_MAX 1
74 #define MRVL_MUSDK_PREFETCH_SHIFT 2
76 #define MRVL_MATCH_LEN 16
77 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
78 /* Maximum allowable packet size */
79 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
81 #define MRVL_IFACE_NAME_ARG "iface"
82 #define MRVL_CFG_ARG "cfg"
84 #define MRVL_BURST_SIZE 64
86 #define MRVL_ARP_LENGTH 28
88 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
90 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
91 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
93 static const char * const valid_args[] = {
99 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
100 static struct pp2_hif *hifs[RTE_MAX_LCORE];
101 static int used_bpools[PP2_NUM_PKT_PROC] = {
102 MRVL_MUSDK_BPOOLS_RESERVED,
103 MRVL_MUSDK_BPOOLS_RESERVED
106 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
107 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
108 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
111 * To use buffer harvesting based on loopback port shadow queue structure
112 * was introduced for buffers information bookkeeping.
114 * Before sending the packet, related buffer information (pp2_buff_inf) is
115 * stored in shadow queue. After packet is transmitted no longer used
116 * packet buffer is released back to it's original hardware pool,
117 * on condition it originated from interface.
118 * In case it was generated by application itself i.e: mbuf->port field is
119 * 0xff then its released to software mempool.
121 struct mrvl_shadow_txq {
122 int head; /* write index - used when sending buffers */
123 int tail; /* read index - used when releasing buffers */
124 u16 size; /* queue occupied size */
125 u16 num_to_release; /* number of buffers sent, that can be released */
126 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
130 struct mrvl_priv *priv;
131 struct rte_mempool *mp;
137 struct mrvl_priv *priv;
143 * Every tx queue should have dedicated shadow tx queue.
145 * Ports assigned by DPDK might not start at zero or be continuous so
146 * as a workaround define shadow queues for each possible port so that
147 * we eventually fit somewhere.
149 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
151 /** Number of ports configured. */
153 static int mrvl_lcore_first;
154 static int mrvl_lcore_last;
157 mrvl_get_bpool_size(int pp2_id, int pool_id)
162 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
163 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
169 mrvl_reserve_bit(int *bitmap, int max)
171 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
182 * Ethernet device configuration.
184 * Prepare the driver for a given number of TX and RX queues.
187 * Pointer to Ethernet device structure.
190 * 0 on success, negative error value otherwise.
193 mrvl_dev_configure(struct rte_eth_dev *dev)
195 struct mrvl_priv *priv = dev->data->dev_private;
198 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
199 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
200 dev->data->dev_conf.rxmode.mq_mode);
204 if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
206 "L2 CRC stripping is always enabled in hw\n");
207 dev->data->dev_conf.rxmode.hw_strip_crc = 1;
210 if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
211 RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
215 if (dev->data->dev_conf.rxmode.split_hdr_size) {
216 RTE_LOG(INFO, PMD, "Split headers not supported\n");
220 if (dev->data->dev_conf.rxmode.enable_scatter) {
221 RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
225 if (dev->data->dev_conf.rxmode.enable_lro) {
226 RTE_LOG(INFO, PMD, "LRO not supported\n");
230 if (dev->data->dev_conf.rxmode.jumbo_frame)
231 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
232 ETHER_HDR_LEN - ETHER_CRC_LEN;
234 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
235 dev->data->nb_rx_queues);
239 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
240 priv->nb_rx_queues = dev->data->nb_rx_queues;
246 * DPDK callback to change the MTU.
248 * Setting the MTU affects hardware MRU (packets larger than the MRU
252 * Pointer to Ethernet device structure.
257 * 0 on success, negative error value otherwise.
260 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
262 struct mrvl_priv *priv = dev->data->dev_private;
263 /* extra MV_MH_SIZE bytes are required for Marvell tag */
264 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
267 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
270 ret = pp2_ppio_set_mru(priv->ppio, mru);
274 return pp2_ppio_set_mtu(priv->ppio, mtu);
278 * DPDK callback to bring the link up.
281 * Pointer to Ethernet device structure.
284 * 0 on success, negative error value otherwise.
287 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
289 struct mrvl_priv *priv = dev->data->dev_private;
292 ret = pp2_ppio_enable(priv->ppio);
297 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
298 * as pp2_ppio_enable() changes port->t_mode from default 0 to
299 * PP2_TRAFFIC_INGRESS_EGRESS.
301 * Set mtu to default DPDK value here.
303 ret = mrvl_mtu_set(dev, dev->data->mtu);
305 pp2_ppio_disable(priv->ppio);
307 dev->data->dev_link.link_status = ETH_LINK_UP;
313 * DPDK callback to bring the link down.
316 * Pointer to Ethernet device structure.
319 * 0 on success, negative error value otherwise.
322 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
324 struct mrvl_priv *priv = dev->data->dev_private;
327 ret = pp2_ppio_disable(priv->ppio);
331 dev->data->dev_link.link_status = ETH_LINK_DOWN;
337 * DPDK callback to start the device.
340 * Pointer to Ethernet device structure.
343 * 0 on success, negative errno value on failure.
346 mrvl_dev_start(struct rte_eth_dev *dev)
348 struct mrvl_priv *priv = dev->data->dev_private;
349 char match[MRVL_MATCH_LEN];
352 snprintf(match, sizeof(match), "ppio-%d:%d",
353 priv->pp_id, priv->ppio_id);
354 priv->ppio_params.match = match;
357 * Calculate the maximum bpool size for refill feature to 1.5 of the
358 * configured size. In case the bpool size will exceed this value,
359 * superfluous buffers will be removed
361 priv->bpool_max_size = priv->bpool_init_size +
362 (priv->bpool_init_size >> 1);
364 * Calculate the minimum bpool size for refill feature as follows:
365 * 2 default burst sizes multiply by number of rx queues.
366 * If the bpool size will be below this value, new buffers will
367 * be added to the pool.
369 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
371 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
375 /* For default QoS config, don't start classifier. */
377 ret = mrvl_start_qos_mapping(priv);
379 pp2_ppio_deinit(priv->ppio);
384 ret = mrvl_dev_set_link_up(dev);
390 pp2_ppio_deinit(priv->ppio);
395 * Flush receive queues.
398 * Pointer to Ethernet device structure.
401 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
405 RTE_LOG(INFO, PMD, "Flushing rx queues\n");
406 for (i = 0; i < dev->data->nb_rx_queues; i++) {
410 struct mrvl_rxq *q = dev->data->rx_queues[i];
411 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
413 num = MRVL_PP2_RXD_MAX;
414 ret = pp2_ppio_recv(q->priv->ppio,
415 q->priv->rxq_map[q->queue_id].tc,
416 q->priv->rxq_map[q->queue_id].inq,
417 descs, (uint16_t *)&num);
418 } while (ret == 0 && num);
423 * Flush transmit shadow queues.
426 * Pointer to Ethernet device structure.
429 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
433 RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
434 for (i = 0; i < RTE_MAX_LCORE; i++) {
435 struct mrvl_shadow_txq *sq =
436 &shadow_txqs[dev->data->port_id][i];
438 while (sq->tail != sq->head) {
439 uint64_t addr = cookie_addr_high |
440 sq->ent[sq->tail].buff.cookie;
441 rte_pktmbuf_free((struct rte_mbuf *)addr);
442 sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
445 memset(sq, 0, sizeof(*sq));
450 * Flush hardware bpool (buffer-pool).
453 * Pointer to Ethernet device structure.
456 mrvl_flush_bpool(struct rte_eth_dev *dev)
458 struct mrvl_priv *priv = dev->data->dev_private;
462 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
464 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
469 struct pp2_buff_inf inf;
472 ret = pp2_bpool_get_buff(hifs[rte_lcore_id()], priv->bpool,
477 addr = cookie_addr_high | inf.cookie;
478 rte_pktmbuf_free((struct rte_mbuf *)addr);
483 * DPDK callback to stop the device.
486 * Pointer to Ethernet device structure.
489 mrvl_dev_stop(struct rte_eth_dev *dev)
491 struct mrvl_priv *priv = dev->data->dev_private;
493 mrvl_dev_set_link_down(dev);
494 mrvl_flush_rx_queues(dev);
495 mrvl_flush_tx_shadow_queues(dev);
497 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
498 pp2_ppio_deinit(priv->ppio);
503 * DPDK callback to close the device.
506 * Pointer to Ethernet device structure.
509 mrvl_dev_close(struct rte_eth_dev *dev)
511 struct mrvl_priv *priv = dev->data->dev_private;
514 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
515 struct pp2_ppio_tc_params *tc_params =
516 &priv->ppio_params.inqs_params.tcs_params[i];
518 if (tc_params->inqs_params) {
519 rte_free(tc_params->inqs_params);
520 tc_params->inqs_params = NULL;
524 mrvl_flush_bpool(dev);
528 * DPDK callback to retrieve physical link information.
531 * Pointer to Ethernet device structure.
532 * @param wait_to_complete
533 * Wait for request completion (ignored).
536 * 0 on success, negative error value otherwise.
539 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
543 * once MUSDK provides necessary API use it here
545 struct ethtool_cmd edata;
549 edata.cmd = ETHTOOL_GSET;
551 strcpy(req.ifr_name, dev->data->name);
552 req.ifr_data = (void *)&edata;
554 fd = socket(AF_INET, SOCK_DGRAM, 0);
558 ret = ioctl(fd, SIOCETHTOOL, &req);
566 switch (ethtool_cmd_speed(&edata)) {
568 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
571 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
574 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
577 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
580 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
583 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
584 ETH_LINK_HALF_DUPLEX;
585 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
592 * DPDK callback to enable promiscuous mode.
595 * Pointer to Ethernet device structure.
598 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
600 struct mrvl_priv *priv = dev->data->dev_private;
603 ret = pp2_ppio_set_uc_promisc(priv->ppio, 1);
605 RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
609 * DPDK callback to enable allmulti mode.
612 * Pointer to Ethernet device structure.
615 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
617 struct mrvl_priv *priv = dev->data->dev_private;
620 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
622 RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
626 * DPDK callback to disable promiscuous mode.
629 * Pointer to Ethernet device structure.
632 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
634 struct mrvl_priv *priv = dev->data->dev_private;
637 ret = pp2_ppio_set_uc_promisc(priv->ppio, 0);
639 RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
643 * DPDK callback to disable allmulticast mode.
646 * Pointer to Ethernet device structure.
649 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
651 struct mrvl_priv *priv = dev->data->dev_private;
654 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
656 RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
660 * DPDK callback to set the primary MAC address.
663 * Pointer to Ethernet device structure.
665 * MAC address to register.
668 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
670 struct mrvl_priv *priv = dev->data->dev_private;
672 pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
675 * Port stops sending packets if pp2_ppio_set_mac_addr()
676 * was called after pp2_ppio_enable(). As a quick fix issue
677 * enable port once again.
679 pp2_ppio_enable(priv->ppio);
683 * DPDK callback to get information about the device.
686 * Pointer to Ethernet device structure (unused).
688 * Info structure output buffer.
691 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
692 struct rte_eth_dev_info *info)
694 info->speed_capa = ETH_LINK_SPEED_10M |
695 ETH_LINK_SPEED_100M |
699 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
700 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
701 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
703 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
704 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
705 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
707 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
708 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
709 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
711 info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
712 /* By default packets are dropped if no descriptors are available */
713 info->default_rxconf.rx_drop_en = 1;
715 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
719 * DPDK callback to get information about specific receive queue.
722 * Pointer to Ethernet device structure.
724 * Receive queue index.
726 * Receive queue information structure.
728 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
729 struct rte_eth_rxq_info *qinfo)
731 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
732 struct mrvl_priv *priv = dev->data->dev_private;
733 int inq = priv->rxq_map[rx_queue_id].inq;
734 int tc = priv->rxq_map[rx_queue_id].tc;
735 struct pp2_ppio_tc_params *tc_params =
736 &priv->ppio_params.inqs_params.tcs_params[tc];
739 qinfo->nb_desc = tc_params->inqs_params[inq].size;
743 * DPDK callback to get information about specific transmit queue.
746 * Pointer to Ethernet device structure.
748 * Transmit queue index.
750 * Transmit queue information structure.
752 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
753 struct rte_eth_txq_info *qinfo)
755 struct mrvl_priv *priv = dev->data->dev_private;
758 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
762 * Release buffers to hardware bpool (buffer-pool)
765 * Receive queue pointer.
767 * Number of buffers to release to bpool.
770 * 0 on success, negative error value otherwise.
773 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
775 struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
776 struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
778 unsigned int core_id = rte_lcore_id();
779 struct pp2_hif *hif = hifs[core_id];
780 struct pp2_bpool *bpool = rxq->priv->bpool;
782 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
786 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
788 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
790 for (i = 0; i < num; i++) {
791 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
792 != cookie_addr_high) {
794 "mbuf virtual addr high 0x%lx out of range\n",
795 (uint64_t)mbufs[i] >> 32);
799 entries[i].buff.addr =
800 rte_mbuf_data_dma_addr_default(mbufs[i]);
801 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
802 entries[i].bpool = bpool;
805 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
806 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
814 rte_pktmbuf_free(mbufs[i]);
820 * DPDK callback to configure the receive queue.
823 * Pointer to Ethernet device structure.
827 * Number of descriptors to configure in queue.
829 * NUMA socket on which memory must be allocated.
831 * Thresholds parameters (unused_).
833 * Memory pool for buffer allocations.
836 * 0 on success, negative error value otherwise.
839 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
841 const struct rte_eth_rxconf *conf __rte_unused,
842 struct rte_mempool *mp)
844 struct mrvl_priv *priv = dev->data->dev_private;
845 struct mrvl_rxq *rxq;
847 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
850 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
852 * Unknown TC mapping, mapping will not have a correct queue.
854 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
859 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
861 if (min_size < max_rx_pkt_len) {
863 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
864 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
870 if (dev->data->rx_queues[idx]) {
871 rte_free(dev->data->rx_queues[idx]);
872 dev->data->rx_queues[idx] = NULL;
875 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
882 rxq->port_id = dev->data->port_id;
883 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
885 tc = priv->rxq_map[rxq->queue_id].tc,
886 inq = priv->rxq_map[rxq->queue_id].inq;
887 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
890 ret = mrvl_fill_bpool(rxq, desc);
896 priv->bpool_init_size += desc;
898 dev->data->rx_queues[idx] = rxq;
904 * DPDK callback to release the receive queue.
907 * Generic receive queue pointer.
910 mrvl_rx_queue_release(void *rxq)
912 struct mrvl_rxq *q = rxq;
913 struct pp2_ppio_tc_params *tc_params;
919 tc = q->priv->rxq_map[q->queue_id].tc;
920 inq = q->priv->rxq_map[q->queue_id].inq;
921 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
922 num = tc_params->inqs_params[inq].size;
923 for (i = 0; i < num; i++) {
924 struct pp2_buff_inf inf;
927 pp2_bpool_get_buff(hifs[rte_lcore_id()], q->priv->bpool, &inf);
928 addr = cookie_addr_high | inf.cookie;
929 rte_pktmbuf_free((struct rte_mbuf *)addr);
936 * DPDK callback to configure the transmit queue.
939 * Pointer to Ethernet device structure.
941 * Transmit queue index.
943 * Number of descriptors to configure in the queue.
945 * NUMA socket on which memory must be allocated.
947 * Thresholds parameters (unused).
950 * 0 on success, negative error value otherwise.
953 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
955 const struct rte_eth_txconf *conf __rte_unused)
957 struct mrvl_priv *priv = dev->data->dev_private;
958 struct mrvl_txq *txq;
960 if (dev->data->tx_queues[idx]) {
961 rte_free(dev->data->tx_queues[idx]);
962 dev->data->tx_queues[idx] = NULL;
965 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
971 txq->port_id = dev->data->port_id;
972 dev->data->tx_queues[idx] = txq;
974 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
975 priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
981 * DPDK callback to release the transmit queue.
984 * Generic transmit queue pointer.
987 mrvl_tx_queue_release(void *txq)
989 struct mrvl_txq *q = txq;
997 static const struct eth_dev_ops mrvl_ops = {
998 .dev_configure = mrvl_dev_configure,
999 .dev_start = mrvl_dev_start,
1000 .dev_stop = mrvl_dev_stop,
1001 .dev_set_link_up = mrvl_dev_set_link_up,
1002 .dev_set_link_down = mrvl_dev_set_link_down,
1003 .dev_close = mrvl_dev_close,
1004 .link_update = mrvl_link_update,
1005 .promiscuous_enable = mrvl_promiscuous_enable,
1006 .allmulticast_enable = mrvl_allmulticast_enable,
1007 .promiscuous_disable = mrvl_promiscuous_disable,
1008 .allmulticast_disable = mrvl_allmulticast_disable,
1009 .mac_addr_set = mrvl_mac_addr_set,
1010 .mtu_set = mrvl_mtu_set,
1011 .dev_infos_get = mrvl_dev_infos_get,
1012 .rxq_info_get = mrvl_rxq_info_get,
1013 .txq_info_get = mrvl_txq_info_get,
1014 .rx_queue_setup = mrvl_rx_queue_setup,
1015 .rx_queue_release = mrvl_rx_queue_release,
1016 .tx_queue_setup = mrvl_tx_queue_setup,
1017 .tx_queue_release = mrvl_tx_queue_release,
1021 * DPDK callback for receive.
1024 * Generic pointer to the receive queue.
1026 * Array to store received packets.
1028 * Maximum number of packets in array.
1031 * Number of packets successfully received.
1034 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1036 struct mrvl_rxq *q = rxq;
1037 struct pp2_ppio_desc descs[nb_pkts];
1038 struct pp2_bpool *bpool;
1039 int i, ret, rx_done = 0;
1041 unsigned int core_id = rte_lcore_id();
1043 if (unlikely(!q->priv->ppio))
1046 bpool = q->priv->bpool;
1048 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
1049 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
1050 if (unlikely(ret < 0)) {
1051 RTE_LOG(ERR, PMD, "Failed to receive packets\n");
1054 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
1056 for (i = 0; i < nb_pkts; i++) {
1057 struct rte_mbuf *mbuf;
1058 enum pp2_inq_desc_status status;
1061 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1062 struct pp2_ppio_desc *pref_desc;
1065 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
1066 pref_addr = cookie_addr_high |
1067 pp2_ppio_inq_desc_get_cookie(pref_desc);
1068 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
1069 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
1072 addr = cookie_addr_high |
1073 pp2_ppio_inq_desc_get_cookie(&descs[i]);
1074 mbuf = (struct rte_mbuf *)addr;
1075 rte_pktmbuf_reset(mbuf);
1077 /* drop packet in case of mac, overrun or resource error */
1078 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
1079 if (unlikely(status != PP2_DESC_ERR_OK)) {
1080 struct pp2_buff_inf binf = {
1081 .addr = rte_mbuf_data_dma_addr_default(mbuf),
1082 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
1085 pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
1086 mrvl_port_bpool_size
1087 [bpool->pp2_id][bpool->id][core_id]++;
1091 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
1092 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
1093 mbuf->data_len = mbuf->pkt_len;
1094 mbuf->port = q->port_id;
1096 rx_pkts[rx_done++] = mbuf;
1099 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
1100 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
1102 if (unlikely(num <= q->priv->bpool_min_size ||
1103 (!rx_done && num < q->priv->bpool_init_size))) {
1104 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
1106 RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
1107 } else if (unlikely(num > q->priv->bpool_max_size)) {
1109 int pkt_to_remove = num - q->priv->bpool_init_size;
1110 struct rte_mbuf *mbuf;
1111 struct pp2_buff_inf buff;
1114 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
1115 bpool->pp2_id, q->priv->ppio->port_id,
1116 bpool->id, pkt_to_remove, num,
1117 q->priv->bpool_init_size);
1119 for (i = 0; i < pkt_to_remove; i++) {
1120 pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
1121 mbuf = (struct rte_mbuf *)
1122 (cookie_addr_high | buff.cookie);
1123 rte_pktmbuf_free(mbuf);
1125 mrvl_port_bpool_size
1126 [bpool->pp2_id][bpool->id][core_id] -=
1129 rte_spinlock_unlock(&q->priv->lock);
1136 * Release already sent buffers to bpool (buffer-pool).
1139 * Pointer to the port structure.
1141 * Pointer to the MUSDK hardware interface.
1143 * Pointer to the shadow queue.
1147 * Force releasing packets.
1150 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
1151 struct mrvl_shadow_txq *sq, int qid, int force)
1153 struct buff_release_entry *entry;
1154 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
1155 int i, core_id = rte_lcore_id();
1157 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
1159 sq->num_to_release += nb_done;
1161 if (likely(!force &&
1162 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
1165 nb_done = sq->num_to_release;
1166 sq->num_to_release = 0;
1168 for (i = 0; i < nb_done; i++) {
1169 entry = &sq->ent[sq->tail + num];
1170 if (unlikely(!entry->buff.addr)) {
1172 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
1173 sq->tail, (u64)entry->buff.cookie,
1174 (u64)entry->buff.addr);
1179 if (unlikely(!entry->bpool)) {
1180 struct rte_mbuf *mbuf;
1182 mbuf = (struct rte_mbuf *)
1183 (cookie_addr_high | entry->buff.cookie);
1184 rte_pktmbuf_free(mbuf);
1189 mrvl_port_bpool_size
1190 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
1192 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
1197 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1199 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1205 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1206 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1212 * DPDK callback for transmit.
1215 * Generic pointer transmit queue.
1217 * Packets to transmit.
1219 * Number of packets in array.
1222 * Number of packets successfully transmitted.
1225 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1227 struct mrvl_txq *q = txq;
1228 struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
1229 struct pp2_hif *hif = hifs[rte_lcore_id()];
1230 struct pp2_ppio_desc descs[nb_pkts];
1232 uint16_t num, sq_free_size;
1234 if (unlikely(!q->priv->ppio))
1238 mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
1240 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
1241 if (unlikely(nb_pkts > sq_free_size)) {
1243 "No room in shadow queue for %d packets! %d packets will be sent.\n",
1244 nb_pkts, sq_free_size);
1245 nb_pkts = sq_free_size;
1248 for (i = 0; i < nb_pkts; i++) {
1249 struct rte_mbuf *mbuf = tx_pkts[i];
1251 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1252 struct rte_mbuf *pref_pkt_hdr;
1254 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
1255 rte_mbuf_prefetch_part1(pref_pkt_hdr);
1256 rte_mbuf_prefetch_part2(pref_pkt_hdr);
1259 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
1260 sq->ent[sq->head].buff.addr =
1261 rte_mbuf_data_dma_addr_default(mbuf);
1262 sq->ent[sq->head].bpool =
1263 (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
1264 NULL : mrvl_port_to_bpool_lookup[mbuf->port];
1265 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
1268 pp2_ppio_outq_desc_reset(&descs[i]);
1269 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
1270 rte_pktmbuf_mtophys(mbuf));
1271 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
1272 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
1273 rte_pktmbuf_pkt_len(mbuf));
1277 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
1278 /* number of packets that were not sent */
1279 if (unlikely(num > nb_pkts)) {
1280 for (i = nb_pkts; i < num; i++) {
1281 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
1282 MRVL_PP2_TX_SHADOWQ_MASK;
1284 sq->size -= num - nb_pkts;
1291 * Initialize packet processor.
1294 * 0 on success, negative error value otherwise.
1299 struct pp2_init_params init_params;
1301 memset(&init_params, 0, sizeof(init_params));
1302 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
1303 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
1305 return pp2_init(&init_params);
1309 * Deinitialize packet processor.
1312 * 0 on success, negative error value otherwise.
1315 mrvl_deinit_pp2(void)
1321 * Create private device structure.
1324 * Pointer to the port name passed in the initialization parameters.
1327 * Pointer to the newly allocated private device structure.
1329 static struct mrvl_priv *
1330 mrvl_priv_create(const char *dev_name)
1332 struct pp2_bpool_params bpool_params;
1333 char match[MRVL_MATCH_LEN];
1334 struct mrvl_priv *priv;
1337 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
1341 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
1342 &priv->pp_id, &priv->ppio_id);
1346 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
1347 PP2_BPOOL_NUM_POOLS);
1350 priv->bpool_bit = bpool_bit;
1352 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
1354 memset(&bpool_params, 0, sizeof(bpool_params));
1355 bpool_params.match = match;
1356 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
1357 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
1359 goto out_clear_bpool_bit;
1361 priv->ppio_params.type = PP2_PPIO_T_NIC;
1362 rte_spinlock_init(&priv->lock);
1365 out_clear_bpool_bit:
1366 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
1373 * Create device representing Ethernet port.
1376 * Pointer to the port's name.
1379 * 0 on success, negative error value otherwise.
1382 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
1384 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
1385 struct rte_eth_dev *eth_dev;
1386 struct mrvl_priv *priv;
1389 eth_dev = rte_eth_dev_allocate(name);
1393 priv = mrvl_priv_create(name);
1399 eth_dev->data->mac_addrs =
1400 rte_zmalloc("mac_addrs",
1401 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
1402 if (!eth_dev->data->mac_addrs) {
1403 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
1408 memset(&req, 0, sizeof(req));
1409 strcpy(req.ifr_name, name);
1410 ret = ioctl(fd, SIOCGIFHWADDR, &req);
1414 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1415 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
1417 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
1418 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
1419 eth_dev->data->dev_private = priv;
1420 eth_dev->device = &vdev->device;
1421 eth_dev->dev_ops = &mrvl_ops;
1425 rte_free(eth_dev->data->mac_addrs);
1427 rte_eth_dev_release_port(eth_dev);
1435 * Cleanup previously created device representing Ethernet port.
1438 * Pointer to the port name.
1441 mrvl_eth_dev_destroy(const char *name)
1443 struct rte_eth_dev *eth_dev;
1444 struct mrvl_priv *priv;
1446 eth_dev = rte_eth_dev_allocated(name);
1450 priv = eth_dev->data->dev_private;
1451 pp2_bpool_deinit(priv->bpool);
1453 rte_free(eth_dev->data->mac_addrs);
1454 rte_eth_dev_release_port(eth_dev);
1458 * Callback used by rte_kvargs_process() during argument parsing.
1461 * Pointer to the parsed key (unused).
1463 * Pointer to the parsed value.
1465 * Pointer to the extra arguments which contains address of the
1466 * table of pointers to parsed interface names.
1472 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
1475 const char **ifnames = extra_args;
1477 ifnames[mrvl_ports_nb++] = value;
1483 * Initialize per-lcore MUSDK hardware interfaces (hifs).
1486 * 0 on success, negative error value otherwise.
1489 mrvl_init_hifs(void)
1491 struct pp2_hif_params params;
1492 char match[MRVL_MATCH_LEN];
1495 RTE_LCORE_FOREACH(i) {
1496 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
1500 snprintf(match, sizeof(match), "hif-%d", ret);
1501 memset(¶ms, 0, sizeof(params));
1502 params.match = match;
1503 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
1504 ret = pp2_hif_init(¶ms, &hifs[i]);
1506 RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", i);
1515 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
1518 mrvl_deinit_hifs(void)
1522 RTE_LCORE_FOREACH(i) {
1524 pp2_hif_deinit(hifs[i]);
1528 static void mrvl_set_first_last_cores(int core_id)
1530 if (core_id < mrvl_lcore_first)
1531 mrvl_lcore_first = core_id;
1533 if (core_id > mrvl_lcore_last)
1534 mrvl_lcore_last = core_id;
1538 * DPDK callback to register the virtual device.
1541 * Pointer to the virtual device.
1544 * 0 on success, negative error value otherwise.
1547 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
1549 struct rte_kvargs *kvlist;
1550 const char *ifnames[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
1552 uint32_t i, ifnum, cfgnum, core_id;
1555 params = rte_vdev_device_args(vdev);
1559 kvlist = rte_kvargs_parse(params, valid_args);
1563 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
1564 if (ifnum > RTE_DIM(ifnames))
1565 goto out_free_kvlist;
1567 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
1568 mrvl_get_ifnames, &ifnames);
1570 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
1572 RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
1573 goto out_free_kvlist;
1574 } else if (cfgnum == 1) {
1575 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
1576 mrvl_get_qoscfg, &mrvl_qos_cfg);
1580 * ret == -EEXIST is correct, it means DMA
1581 * has been already initialized (by another PMD).
1583 ret = mv_sys_dma_mem_init(RTE_MRVL_MUSDK_DMA_MEMSIZE);
1584 if (ret < 0 && ret != -EEXIST)
1585 goto out_free_kvlist;
1587 ret = mrvl_init_pp2();
1589 RTE_LOG(ERR, PMD, "Failed to init PP!\n");
1590 goto out_deinit_dma;
1593 ret = mrvl_init_hifs();
1595 goto out_deinit_hifs;
1597 for (i = 0; i < ifnum; i++) {
1598 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames[i]);
1599 ret = mrvl_eth_dev_create(vdev, ifnames[i]);
1604 rte_kvargs_free(kvlist);
1606 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
1608 mrvl_lcore_first = RTE_MAX_LCORE;
1609 mrvl_lcore_last = 0;
1611 RTE_LCORE_FOREACH(core_id) {
1612 mrvl_set_first_last_cores(core_id);
1618 mrvl_eth_dev_destroy(ifnames[i]);
1623 mv_sys_dma_mem_destroy();
1625 rte_kvargs_free(kvlist);
1631 * DPDK callback to remove virtual device.
1634 * Pointer to the removed virtual device.
1637 * 0 on success, negative error value otherwise.
1640 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
1645 name = rte_vdev_device_name(vdev);
1649 RTE_LOG(INFO, PMD, "Removing %s\n", name);
1651 for (i = 0; i < rte_eth_dev_count(); i++) {
1652 char ifname[RTE_ETH_NAME_MAX_LEN];
1654 rte_eth_dev_get_name_by_port(i, ifname);
1655 mrvl_eth_dev_destroy(ifname);
1660 mv_sys_dma_mem_destroy();
1665 static struct rte_vdev_driver pmd_mrvl_drv = {
1666 .probe = rte_pmd_mrvl_probe,
1667 .remove = rte_pmd_mrvl_remove,
1670 RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
1671 RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);