4 * Copyright(c) 2017 Semihalf. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Semihalf nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_ethdev.h>
34 #include <rte_kvargs.h>
36 #include <rte_malloc.h>
39 /* Unluckily, container_of is defined by both DPDK and MUSDK,
40 * we'll declare only one version.
42 * Note that it is not used in this PMD anyway.
48 #include <drivers/mv_pp2.h>
49 #include <drivers/mv_pp2_bpool.h>
50 #include <drivers/mv_pp2_hif.h>
53 #include <linux/ethtool.h>
54 #include <linux/sockios.h>
56 #include <net/if_arp.h>
57 #include <sys/ioctl.h>
58 #include <sys/socket.h>
60 #include <sys/types.h>
62 #include "mrvl_ethdev.h"
65 /* bitmask with reserved hifs */
66 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
67 /* bitmask with reserved bpools */
68 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
69 /* maximum number of available hifs */
70 #define MRVL_MUSDK_HIFS_MAX 9
72 #define MRVL_MAC_ADDRS_MAX 1
74 #define MRVL_MUSDK_PREFETCH_SHIFT 2
76 #define MRVL_MATCH_LEN 16
77 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
78 /* Maximum allowable packet size */
79 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
81 #define MRVL_IFACE_NAME_ARG "iface"
82 #define MRVL_CFG_ARG "cfg"
84 #define MRVL_BURST_SIZE 64
86 #define MRVL_ARP_LENGTH 28
88 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
90 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
91 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
93 static const char * const valid_args[] = {
99 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
100 static struct pp2_hif *hifs[RTE_MAX_LCORE];
101 static int used_bpools[PP2_NUM_PKT_PROC] = {
102 MRVL_MUSDK_BPOOLS_RESERVED,
103 MRVL_MUSDK_BPOOLS_RESERVED
106 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
107 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
108 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
111 * To use buffer harvesting based on loopback port shadow queue structure
112 * was introduced for buffers information bookkeeping.
114 * Before sending the packet, related buffer information (pp2_buff_inf) is
115 * stored in shadow queue. After packet is transmitted no longer used
116 * packet buffer is released back to it's original hardware pool,
117 * on condition it originated from interface.
118 * In case it was generated by application itself i.e: mbuf->port field is
119 * 0xff then its released to software mempool.
121 struct mrvl_shadow_txq {
122 int head; /* write index - used when sending buffers */
123 int tail; /* read index - used when releasing buffers */
124 u16 size; /* queue occupied size */
125 u16 num_to_release; /* number of buffers sent, that can be released */
126 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
130 struct mrvl_priv *priv;
131 struct rte_mempool *mp;
137 struct mrvl_priv *priv;
143 * Every tx queue should have dedicated shadow tx queue.
145 * Ports assigned by DPDK might not start at zero or be continuous so
146 * as a workaround define shadow queues for each possible port so that
147 * we eventually fit somewhere.
149 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
151 /** Number of ports configured. */
153 static int mrvl_lcore_first;
154 static int mrvl_lcore_last;
157 mrvl_get_bpool_size(int pp2_id, int pool_id)
162 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
163 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
169 mrvl_reserve_bit(int *bitmap, int max)
171 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
182 * Ethernet device configuration.
184 * Prepare the driver for a given number of TX and RX queues.
187 * Pointer to Ethernet device structure.
190 * 0 on success, negative error value otherwise.
193 mrvl_dev_configure(struct rte_eth_dev *dev)
195 struct mrvl_priv *priv = dev->data->dev_private;
198 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE) {
199 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
200 dev->data->dev_conf.rxmode.mq_mode);
204 if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
206 "L2 CRC stripping is always enabled in hw\n");
207 dev->data->dev_conf.rxmode.hw_strip_crc = 1;
210 if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
211 RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
215 if (dev->data->dev_conf.rxmode.split_hdr_size) {
216 RTE_LOG(INFO, PMD, "Split headers not supported\n");
220 if (dev->data->dev_conf.rxmode.enable_scatter) {
221 RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
225 if (dev->data->dev_conf.rxmode.enable_lro) {
226 RTE_LOG(INFO, PMD, "LRO not supported\n");
230 if (dev->data->dev_conf.rxmode.jumbo_frame)
231 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
232 ETHER_HDR_LEN - ETHER_CRC_LEN;
234 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
235 dev->data->nb_rx_queues);
239 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
240 priv->nb_rx_queues = dev->data->nb_rx_queues;
246 * DPDK callback to change the MTU.
248 * Setting the MTU affects hardware MRU (packets larger than the MRU
252 * Pointer to Ethernet device structure.
257 * 0 on success, negative error value otherwise.
260 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
262 struct mrvl_priv *priv = dev->data->dev_private;
263 /* extra MV_MH_SIZE bytes are required for Marvell tag */
264 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
267 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
270 ret = pp2_ppio_set_mru(priv->ppio, mru);
274 return pp2_ppio_set_mtu(priv->ppio, mtu);
278 * DPDK callback to bring the link up.
281 * Pointer to Ethernet device structure.
284 * 0 on success, negative error value otherwise.
287 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
289 struct mrvl_priv *priv = dev->data->dev_private;
292 ret = pp2_ppio_enable(priv->ppio);
297 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
298 * as pp2_ppio_enable() changes port->t_mode from default 0 to
299 * PP2_TRAFFIC_INGRESS_EGRESS.
301 * Set mtu to default DPDK value here.
303 ret = mrvl_mtu_set(dev, dev->data->mtu);
305 pp2_ppio_disable(priv->ppio);
307 dev->data->dev_link.link_status = ETH_LINK_UP;
313 * DPDK callback to bring the link down.
316 * Pointer to Ethernet device structure.
319 * 0 on success, negative error value otherwise.
322 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
324 struct mrvl_priv *priv = dev->data->dev_private;
327 ret = pp2_ppio_disable(priv->ppio);
331 dev->data->dev_link.link_status = ETH_LINK_DOWN;
337 * DPDK callback to start the device.
340 * Pointer to Ethernet device structure.
343 * 0 on success, negative errno value on failure.
346 mrvl_dev_start(struct rte_eth_dev *dev)
348 struct mrvl_priv *priv = dev->data->dev_private;
349 char match[MRVL_MATCH_LEN];
352 snprintf(match, sizeof(match), "ppio-%d:%d",
353 priv->pp_id, priv->ppio_id);
354 priv->ppio_params.match = match;
357 * Calculate the maximum bpool size for refill feature to 1.5 of the
358 * configured size. In case the bpool size will exceed this value,
359 * superfluous buffers will be removed
361 priv->bpool_max_size = priv->bpool_init_size +
362 (priv->bpool_init_size >> 1);
364 * Calculate the minimum bpool size for refill feature as follows:
365 * 2 default burst sizes multiply by number of rx queues.
366 * If the bpool size will be below this value, new buffers will
367 * be added to the pool.
369 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
371 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
375 /* For default QoS config, don't start classifier. */
377 ret = mrvl_start_qos_mapping(priv);
379 pp2_ppio_deinit(priv->ppio);
384 ret = mrvl_dev_set_link_up(dev);
390 pp2_ppio_deinit(priv->ppio);
395 * Flush receive queues.
398 * Pointer to Ethernet device structure.
401 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
405 RTE_LOG(INFO, PMD, "Flushing rx queues\n");
406 for (i = 0; i < dev->data->nb_rx_queues; i++) {
410 struct mrvl_rxq *q = dev->data->rx_queues[i];
411 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
413 num = MRVL_PP2_RXD_MAX;
414 ret = pp2_ppio_recv(q->priv->ppio,
415 q->priv->rxq_map[q->queue_id].tc,
416 q->priv->rxq_map[q->queue_id].inq,
417 descs, (uint16_t *)&num);
418 } while (ret == 0 && num);
423 * Flush transmit shadow queues.
426 * Pointer to Ethernet device structure.
429 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
433 RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
434 for (i = 0; i < RTE_MAX_LCORE; i++) {
435 struct mrvl_shadow_txq *sq =
436 &shadow_txqs[dev->data->port_id][i];
438 while (sq->tail != sq->head) {
439 uint64_t addr = cookie_addr_high |
440 sq->ent[sq->tail].buff.cookie;
441 rte_pktmbuf_free((struct rte_mbuf *)addr);
442 sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
445 memset(sq, 0, sizeof(*sq));
450 * Flush hardware bpool (buffer-pool).
453 * Pointer to Ethernet device structure.
456 mrvl_flush_bpool(struct rte_eth_dev *dev)
458 struct mrvl_priv *priv = dev->data->dev_private;
462 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
464 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
469 struct pp2_buff_inf inf;
472 ret = pp2_bpool_get_buff(hifs[rte_lcore_id()], priv->bpool,
477 addr = cookie_addr_high | inf.cookie;
478 rte_pktmbuf_free((struct rte_mbuf *)addr);
483 * DPDK callback to stop the device.
486 * Pointer to Ethernet device structure.
489 mrvl_dev_stop(struct rte_eth_dev *dev)
491 struct mrvl_priv *priv = dev->data->dev_private;
493 mrvl_dev_set_link_down(dev);
494 mrvl_flush_rx_queues(dev);
495 mrvl_flush_tx_shadow_queues(dev);
497 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
498 pp2_ppio_deinit(priv->ppio);
503 * DPDK callback to close the device.
506 * Pointer to Ethernet device structure.
509 mrvl_dev_close(struct rte_eth_dev *dev)
511 struct mrvl_priv *priv = dev->data->dev_private;
514 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
515 struct pp2_ppio_tc_params *tc_params =
516 &priv->ppio_params.inqs_params.tcs_params[i];
518 if (tc_params->inqs_params) {
519 rte_free(tc_params->inqs_params);
520 tc_params->inqs_params = NULL;
524 mrvl_flush_bpool(dev);
528 * DPDK callback to retrieve physical link information.
531 * Pointer to Ethernet device structure.
532 * @param wait_to_complete
533 * Wait for request completion (ignored).
536 * 0 on success, negative error value otherwise.
539 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
543 * once MUSDK provides necessary API use it here
545 struct ethtool_cmd edata;
549 edata.cmd = ETHTOOL_GSET;
551 strcpy(req.ifr_name, dev->data->name);
552 req.ifr_data = (void *)&edata;
554 fd = socket(AF_INET, SOCK_DGRAM, 0);
558 ret = ioctl(fd, SIOCETHTOOL, &req);
566 switch (ethtool_cmd_speed(&edata)) {
568 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
571 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
574 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
577 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
580 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
583 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
584 ETH_LINK_HALF_DUPLEX;
585 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
592 * DPDK callback to set the primary MAC address.
595 * Pointer to Ethernet device structure.
597 * MAC address to register.
600 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
602 struct mrvl_priv *priv = dev->data->dev_private;
604 pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
607 * Port stops sending packets if pp2_ppio_set_mac_addr()
608 * was called after pp2_ppio_enable(). As a quick fix issue
609 * enable port once again.
611 pp2_ppio_enable(priv->ppio);
615 * DPDK callback to get information about the device.
618 * Pointer to Ethernet device structure (unused).
620 * Info structure output buffer.
623 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
624 struct rte_eth_dev_info *info)
626 info->speed_capa = ETH_LINK_SPEED_10M |
627 ETH_LINK_SPEED_100M |
631 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
632 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
633 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
635 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
636 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
637 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
639 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
640 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
641 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
643 info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME;
644 /* By default packets are dropped if no descriptors are available */
645 info->default_rxconf.rx_drop_en = 1;
647 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
651 * DPDK callback to get information about specific receive queue.
654 * Pointer to Ethernet device structure.
656 * Receive queue index.
658 * Receive queue information structure.
660 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
661 struct rte_eth_rxq_info *qinfo)
663 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
664 struct mrvl_priv *priv = dev->data->dev_private;
665 int inq = priv->rxq_map[rx_queue_id].inq;
666 int tc = priv->rxq_map[rx_queue_id].tc;
667 struct pp2_ppio_tc_params *tc_params =
668 &priv->ppio_params.inqs_params.tcs_params[tc];
671 qinfo->nb_desc = tc_params->inqs_params[inq].size;
675 * DPDK callback to get information about specific transmit queue.
678 * Pointer to Ethernet device structure.
680 * Transmit queue index.
682 * Transmit queue information structure.
684 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
685 struct rte_eth_txq_info *qinfo)
687 struct mrvl_priv *priv = dev->data->dev_private;
690 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
694 * Release buffers to hardware bpool (buffer-pool)
697 * Receive queue pointer.
699 * Number of buffers to release to bpool.
702 * 0 on success, negative error value otherwise.
705 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
707 struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
708 struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
710 unsigned int core_id = rte_lcore_id();
711 struct pp2_hif *hif = hifs[core_id];
712 struct pp2_bpool *bpool = rxq->priv->bpool;
714 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
718 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
720 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
722 for (i = 0; i < num; i++) {
723 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
724 != cookie_addr_high) {
726 "mbuf virtual addr high 0x%lx out of range\n",
727 (uint64_t)mbufs[i] >> 32);
731 entries[i].buff.addr =
732 rte_mbuf_data_dma_addr_default(mbufs[i]);
733 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
734 entries[i].bpool = bpool;
737 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
738 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
746 rte_pktmbuf_free(mbufs[i]);
752 * DPDK callback to configure the receive queue.
755 * Pointer to Ethernet device structure.
759 * Number of descriptors to configure in queue.
761 * NUMA socket on which memory must be allocated.
763 * Thresholds parameters (unused_).
765 * Memory pool for buffer allocations.
768 * 0 on success, negative error value otherwise.
771 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
773 const struct rte_eth_rxconf *conf __rte_unused,
774 struct rte_mempool *mp)
776 struct mrvl_priv *priv = dev->data->dev_private;
777 struct mrvl_rxq *rxq;
779 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
782 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
784 * Unknown TC mapping, mapping will not have a correct queue.
786 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
791 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
793 if (min_size < max_rx_pkt_len) {
795 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
796 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
802 if (dev->data->rx_queues[idx]) {
803 rte_free(dev->data->rx_queues[idx]);
804 dev->data->rx_queues[idx] = NULL;
807 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
814 rxq->port_id = dev->data->port_id;
815 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
817 tc = priv->rxq_map[rxq->queue_id].tc,
818 inq = priv->rxq_map[rxq->queue_id].inq;
819 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
822 ret = mrvl_fill_bpool(rxq, desc);
828 priv->bpool_init_size += desc;
830 dev->data->rx_queues[idx] = rxq;
836 * DPDK callback to release the receive queue.
839 * Generic receive queue pointer.
842 mrvl_rx_queue_release(void *rxq)
844 struct mrvl_rxq *q = rxq;
845 struct pp2_ppio_tc_params *tc_params;
851 tc = q->priv->rxq_map[q->queue_id].tc;
852 inq = q->priv->rxq_map[q->queue_id].inq;
853 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
854 num = tc_params->inqs_params[inq].size;
855 for (i = 0; i < num; i++) {
856 struct pp2_buff_inf inf;
859 pp2_bpool_get_buff(hifs[rte_lcore_id()], q->priv->bpool, &inf);
860 addr = cookie_addr_high | inf.cookie;
861 rte_pktmbuf_free((struct rte_mbuf *)addr);
868 * DPDK callback to configure the transmit queue.
871 * Pointer to Ethernet device structure.
873 * Transmit queue index.
875 * Number of descriptors to configure in the queue.
877 * NUMA socket on which memory must be allocated.
879 * Thresholds parameters (unused).
882 * 0 on success, negative error value otherwise.
885 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
887 const struct rte_eth_txconf *conf __rte_unused)
889 struct mrvl_priv *priv = dev->data->dev_private;
890 struct mrvl_txq *txq;
892 if (dev->data->tx_queues[idx]) {
893 rte_free(dev->data->tx_queues[idx]);
894 dev->data->tx_queues[idx] = NULL;
897 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
903 txq->port_id = dev->data->port_id;
904 dev->data->tx_queues[idx] = txq;
906 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
907 priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
913 * DPDK callback to release the transmit queue.
916 * Generic transmit queue pointer.
919 mrvl_tx_queue_release(void *txq)
921 struct mrvl_txq *q = txq;
929 static const struct eth_dev_ops mrvl_ops = {
930 .dev_configure = mrvl_dev_configure,
931 .dev_start = mrvl_dev_start,
932 .dev_stop = mrvl_dev_stop,
933 .dev_set_link_up = mrvl_dev_set_link_up,
934 .dev_set_link_down = mrvl_dev_set_link_down,
935 .dev_close = mrvl_dev_close,
936 .link_update = mrvl_link_update,
937 .mac_addr_set = mrvl_mac_addr_set,
938 .mtu_set = mrvl_mtu_set,
939 .dev_infos_get = mrvl_dev_infos_get,
940 .rxq_info_get = mrvl_rxq_info_get,
941 .txq_info_get = mrvl_txq_info_get,
942 .rx_queue_setup = mrvl_rx_queue_setup,
943 .rx_queue_release = mrvl_rx_queue_release,
944 .tx_queue_setup = mrvl_tx_queue_setup,
945 .tx_queue_release = mrvl_tx_queue_release,
949 * DPDK callback for receive.
952 * Generic pointer to the receive queue.
954 * Array to store received packets.
956 * Maximum number of packets in array.
959 * Number of packets successfully received.
962 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
964 struct mrvl_rxq *q = rxq;
965 struct pp2_ppio_desc descs[nb_pkts];
966 struct pp2_bpool *bpool;
967 int i, ret, rx_done = 0;
969 unsigned int core_id = rte_lcore_id();
971 if (unlikely(!q->priv->ppio))
974 bpool = q->priv->bpool;
976 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
977 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
978 if (unlikely(ret < 0)) {
979 RTE_LOG(ERR, PMD, "Failed to receive packets\n");
982 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
984 for (i = 0; i < nb_pkts; i++) {
985 struct rte_mbuf *mbuf;
986 enum pp2_inq_desc_status status;
989 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
990 struct pp2_ppio_desc *pref_desc;
993 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
994 pref_addr = cookie_addr_high |
995 pp2_ppio_inq_desc_get_cookie(pref_desc);
996 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
997 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
1000 addr = cookie_addr_high |
1001 pp2_ppio_inq_desc_get_cookie(&descs[i]);
1002 mbuf = (struct rte_mbuf *)addr;
1003 rte_pktmbuf_reset(mbuf);
1005 /* drop packet in case of mac, overrun or resource error */
1006 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
1007 if (unlikely(status != PP2_DESC_ERR_OK)) {
1008 struct pp2_buff_inf binf = {
1009 .addr = rte_mbuf_data_dma_addr_default(mbuf),
1010 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
1013 pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
1014 mrvl_port_bpool_size
1015 [bpool->pp2_id][bpool->id][core_id]++;
1019 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
1020 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
1021 mbuf->data_len = mbuf->pkt_len;
1022 mbuf->port = q->port_id;
1024 rx_pkts[rx_done++] = mbuf;
1027 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
1028 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
1030 if (unlikely(num <= q->priv->bpool_min_size ||
1031 (!rx_done && num < q->priv->bpool_init_size))) {
1032 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
1034 RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
1035 } else if (unlikely(num > q->priv->bpool_max_size)) {
1037 int pkt_to_remove = num - q->priv->bpool_init_size;
1038 struct rte_mbuf *mbuf;
1039 struct pp2_buff_inf buff;
1042 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
1043 bpool->pp2_id, q->priv->ppio->port_id,
1044 bpool->id, pkt_to_remove, num,
1045 q->priv->bpool_init_size);
1047 for (i = 0; i < pkt_to_remove; i++) {
1048 pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
1049 mbuf = (struct rte_mbuf *)
1050 (cookie_addr_high | buff.cookie);
1051 rte_pktmbuf_free(mbuf);
1053 mrvl_port_bpool_size
1054 [bpool->pp2_id][bpool->id][core_id] -=
1057 rte_spinlock_unlock(&q->priv->lock);
1064 * Release already sent buffers to bpool (buffer-pool).
1067 * Pointer to the port structure.
1069 * Pointer to the MUSDK hardware interface.
1071 * Pointer to the shadow queue.
1075 * Force releasing packets.
1078 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
1079 struct mrvl_shadow_txq *sq, int qid, int force)
1081 struct buff_release_entry *entry;
1082 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
1083 int i, core_id = rte_lcore_id();
1085 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
1087 sq->num_to_release += nb_done;
1089 if (likely(!force &&
1090 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
1093 nb_done = sq->num_to_release;
1094 sq->num_to_release = 0;
1096 for (i = 0; i < nb_done; i++) {
1097 entry = &sq->ent[sq->tail + num];
1098 if (unlikely(!entry->buff.addr)) {
1100 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
1101 sq->tail, (u64)entry->buff.cookie,
1102 (u64)entry->buff.addr);
1107 if (unlikely(!entry->bpool)) {
1108 struct rte_mbuf *mbuf;
1110 mbuf = (struct rte_mbuf *)
1111 (cookie_addr_high | entry->buff.cookie);
1112 rte_pktmbuf_free(mbuf);
1117 mrvl_port_bpool_size
1118 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
1120 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
1125 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1127 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1133 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1134 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1140 * DPDK callback for transmit.
1143 * Generic pointer transmit queue.
1145 * Packets to transmit.
1147 * Number of packets in array.
1150 * Number of packets successfully transmitted.
1153 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1155 struct mrvl_txq *q = txq;
1156 struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
1157 struct pp2_hif *hif = hifs[rte_lcore_id()];
1158 struct pp2_ppio_desc descs[nb_pkts];
1160 uint16_t num, sq_free_size;
1162 if (unlikely(!q->priv->ppio))
1166 mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
1168 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
1169 if (unlikely(nb_pkts > sq_free_size)) {
1171 "No room in shadow queue for %d packets! %d packets will be sent.\n",
1172 nb_pkts, sq_free_size);
1173 nb_pkts = sq_free_size;
1176 for (i = 0; i < nb_pkts; i++) {
1177 struct rte_mbuf *mbuf = tx_pkts[i];
1179 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1180 struct rte_mbuf *pref_pkt_hdr;
1182 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
1183 rte_mbuf_prefetch_part1(pref_pkt_hdr);
1184 rte_mbuf_prefetch_part2(pref_pkt_hdr);
1187 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
1188 sq->ent[sq->head].buff.addr =
1189 rte_mbuf_data_dma_addr_default(mbuf);
1190 sq->ent[sq->head].bpool =
1191 (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
1192 NULL : mrvl_port_to_bpool_lookup[mbuf->port];
1193 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
1196 pp2_ppio_outq_desc_reset(&descs[i]);
1197 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
1198 rte_pktmbuf_mtophys(mbuf));
1199 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
1200 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
1201 rte_pktmbuf_pkt_len(mbuf));
1205 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
1206 /* number of packets that were not sent */
1207 if (unlikely(num > nb_pkts)) {
1208 for (i = nb_pkts; i < num; i++) {
1209 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
1210 MRVL_PP2_TX_SHADOWQ_MASK;
1212 sq->size -= num - nb_pkts;
1219 * Initialize packet processor.
1222 * 0 on success, negative error value otherwise.
1227 struct pp2_init_params init_params;
1229 memset(&init_params, 0, sizeof(init_params));
1230 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
1231 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
1233 return pp2_init(&init_params);
1237 * Deinitialize packet processor.
1240 * 0 on success, negative error value otherwise.
1243 mrvl_deinit_pp2(void)
1249 * Create private device structure.
1252 * Pointer to the port name passed in the initialization parameters.
1255 * Pointer to the newly allocated private device structure.
1257 static struct mrvl_priv *
1258 mrvl_priv_create(const char *dev_name)
1260 struct pp2_bpool_params bpool_params;
1261 char match[MRVL_MATCH_LEN];
1262 struct mrvl_priv *priv;
1265 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
1269 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
1270 &priv->pp_id, &priv->ppio_id);
1274 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
1275 PP2_BPOOL_NUM_POOLS);
1278 priv->bpool_bit = bpool_bit;
1280 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
1282 memset(&bpool_params, 0, sizeof(bpool_params));
1283 bpool_params.match = match;
1284 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
1285 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
1287 goto out_clear_bpool_bit;
1289 priv->ppio_params.type = PP2_PPIO_T_NIC;
1290 rte_spinlock_init(&priv->lock);
1293 out_clear_bpool_bit:
1294 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
1301 * Create device representing Ethernet port.
1304 * Pointer to the port's name.
1307 * 0 on success, negative error value otherwise.
1310 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
1312 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
1313 struct rte_eth_dev *eth_dev;
1314 struct mrvl_priv *priv;
1317 eth_dev = rte_eth_dev_allocate(name);
1321 priv = mrvl_priv_create(name);
1327 eth_dev->data->mac_addrs =
1328 rte_zmalloc("mac_addrs",
1329 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
1330 if (!eth_dev->data->mac_addrs) {
1331 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
1336 memset(&req, 0, sizeof(req));
1337 strcpy(req.ifr_name, name);
1338 ret = ioctl(fd, SIOCGIFHWADDR, &req);
1342 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
1343 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
1345 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
1346 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
1347 eth_dev->data->dev_private = priv;
1348 eth_dev->device = &vdev->device;
1349 eth_dev->dev_ops = &mrvl_ops;
1353 rte_free(eth_dev->data->mac_addrs);
1355 rte_eth_dev_release_port(eth_dev);
1363 * Cleanup previously created device representing Ethernet port.
1366 * Pointer to the port name.
1369 mrvl_eth_dev_destroy(const char *name)
1371 struct rte_eth_dev *eth_dev;
1372 struct mrvl_priv *priv;
1374 eth_dev = rte_eth_dev_allocated(name);
1378 priv = eth_dev->data->dev_private;
1379 pp2_bpool_deinit(priv->bpool);
1381 rte_free(eth_dev->data->mac_addrs);
1382 rte_eth_dev_release_port(eth_dev);
1386 * Callback used by rte_kvargs_process() during argument parsing.
1389 * Pointer to the parsed key (unused).
1391 * Pointer to the parsed value.
1393 * Pointer to the extra arguments which contains address of the
1394 * table of pointers to parsed interface names.
1400 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
1403 const char **ifnames = extra_args;
1405 ifnames[mrvl_ports_nb++] = value;
1411 * Initialize per-lcore MUSDK hardware interfaces (hifs).
1414 * 0 on success, negative error value otherwise.
1417 mrvl_init_hifs(void)
1419 struct pp2_hif_params params;
1420 char match[MRVL_MATCH_LEN];
1423 RTE_LCORE_FOREACH(i) {
1424 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
1428 snprintf(match, sizeof(match), "hif-%d", ret);
1429 memset(¶ms, 0, sizeof(params));
1430 params.match = match;
1431 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
1432 ret = pp2_hif_init(¶ms, &hifs[i]);
1434 RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", i);
1443 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
1446 mrvl_deinit_hifs(void)
1450 RTE_LCORE_FOREACH(i) {
1452 pp2_hif_deinit(hifs[i]);
1456 static void mrvl_set_first_last_cores(int core_id)
1458 if (core_id < mrvl_lcore_first)
1459 mrvl_lcore_first = core_id;
1461 if (core_id > mrvl_lcore_last)
1462 mrvl_lcore_last = core_id;
1466 * DPDK callback to register the virtual device.
1469 * Pointer to the virtual device.
1472 * 0 on success, negative error value otherwise.
1475 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
1477 struct rte_kvargs *kvlist;
1478 const char *ifnames[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
1480 uint32_t i, ifnum, cfgnum, core_id;
1483 params = rte_vdev_device_args(vdev);
1487 kvlist = rte_kvargs_parse(params, valid_args);
1491 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
1492 if (ifnum > RTE_DIM(ifnames))
1493 goto out_free_kvlist;
1495 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
1496 mrvl_get_ifnames, &ifnames);
1498 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
1500 RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
1501 goto out_free_kvlist;
1502 } else if (cfgnum == 1) {
1503 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
1504 mrvl_get_qoscfg, &mrvl_qos_cfg);
1508 * ret == -EEXIST is correct, it means DMA
1509 * has been already initialized (by another PMD).
1511 ret = mv_sys_dma_mem_init(RTE_MRVL_MUSDK_DMA_MEMSIZE);
1512 if (ret < 0 && ret != -EEXIST)
1513 goto out_free_kvlist;
1515 ret = mrvl_init_pp2();
1517 RTE_LOG(ERR, PMD, "Failed to init PP!\n");
1518 goto out_deinit_dma;
1521 ret = mrvl_init_hifs();
1523 goto out_deinit_hifs;
1525 for (i = 0; i < ifnum; i++) {
1526 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames[i]);
1527 ret = mrvl_eth_dev_create(vdev, ifnames[i]);
1532 rte_kvargs_free(kvlist);
1534 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
1536 mrvl_lcore_first = RTE_MAX_LCORE;
1537 mrvl_lcore_last = 0;
1539 RTE_LCORE_FOREACH(core_id) {
1540 mrvl_set_first_last_cores(core_id);
1546 mrvl_eth_dev_destroy(ifnames[i]);
1551 mv_sys_dma_mem_destroy();
1553 rte_kvargs_free(kvlist);
1559 * DPDK callback to remove virtual device.
1562 * Pointer to the removed virtual device.
1565 * 0 on success, negative error value otherwise.
1568 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
1573 name = rte_vdev_device_name(vdev);
1577 RTE_LOG(INFO, PMD, "Removing %s\n", name);
1579 for (i = 0; i < rte_eth_dev_count(); i++) {
1580 char ifname[RTE_ETH_NAME_MAX_LEN];
1582 rte_eth_dev_get_name_by_port(i, ifname);
1583 mrvl_eth_dev_destroy(ifname);
1588 mv_sys_dma_mem_destroy();
1593 static struct rte_vdev_driver pmd_mrvl_drv = {
1594 .probe = rte_pmd_mrvl_probe,
1595 .remove = rte_pmd_mrvl_remove,
1598 RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
1599 RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);