4 * Copyright(c) 2017 Semihalf. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Semihalf nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <rte_ethdev.h>
34 #include <rte_kvargs.h>
36 #include <rte_malloc.h>
39 /* Unluckily, container_of is defined by both DPDK and MUSDK,
40 * we'll declare only one version.
42 * Note that it is not used in this PMD anyway.
48 #include <drivers/mv_pp2.h>
49 #include <drivers/mv_pp2_bpool.h>
50 #include <drivers/mv_pp2_hif.h>
53 #include <linux/ethtool.h>
54 #include <linux/sockios.h>
56 #include <net/if_arp.h>
57 #include <sys/ioctl.h>
58 #include <sys/socket.h>
60 #include <sys/types.h>
62 #include "mrvl_ethdev.h"
65 /* bitmask with reserved hifs */
66 #define MRVL_MUSDK_HIFS_RESERVED 0x0F
67 /* bitmask with reserved bpools */
68 #define MRVL_MUSDK_BPOOLS_RESERVED 0x07
69 /* bitmask with reserved kernel RSS tables */
70 #define MRVL_MUSDK_RSS_RESERVED 0x01
71 /* maximum number of available hifs */
72 #define MRVL_MUSDK_HIFS_MAX 9
75 #define MRVL_MUSDK_PREFETCH_SHIFT 2
77 /* TCAM has 25 entries reserved for uc/mc filter entries */
78 #define MRVL_MAC_ADDRS_MAX 25
79 #define MRVL_MATCH_LEN 16
80 #define MRVL_PKT_EFFEC_OFFS (MRVL_PKT_OFFS + MV_MH_SIZE)
81 /* Maximum allowable packet size */
82 #define MRVL_PKT_SIZE_MAX (10240 - MV_MH_SIZE)
84 #define MRVL_IFACE_NAME_ARG "iface"
85 #define MRVL_CFG_ARG "cfg"
87 #define MRVL_BURST_SIZE 64
89 #define MRVL_ARP_LENGTH 28
91 #define MRVL_COOKIE_ADDR_INVALID ~0ULL
93 #define MRVL_COOKIE_HIGH_ADDR_SHIFT (sizeof(pp2_cookie_t) * 8)
94 #define MRVL_COOKIE_HIGH_ADDR_MASK (~0ULL << MRVL_COOKIE_HIGH_ADDR_SHIFT)
96 static const char * const valid_args[] = {
102 static int used_hifs = MRVL_MUSDK_HIFS_RESERVED;
103 static struct pp2_hif *hifs[RTE_MAX_LCORE];
104 static int used_bpools[PP2_NUM_PKT_PROC] = {
105 MRVL_MUSDK_BPOOLS_RESERVED,
106 MRVL_MUSDK_BPOOLS_RESERVED
109 struct pp2_bpool *mrvl_port_to_bpool_lookup[RTE_MAX_ETHPORTS];
110 int mrvl_port_bpool_size[PP2_NUM_PKT_PROC][PP2_BPOOL_NUM_POOLS][RTE_MAX_LCORE];
111 uint64_t cookie_addr_high = MRVL_COOKIE_ADDR_INVALID;
114 * To use buffer harvesting based on loopback port shadow queue structure
115 * was introduced for buffers information bookkeeping.
117 * Before sending the packet, related buffer information (pp2_buff_inf) is
118 * stored in shadow queue. After packet is transmitted no longer used
119 * packet buffer is released back to it's original hardware pool,
120 * on condition it originated from interface.
121 * In case it was generated by application itself i.e: mbuf->port field is
122 * 0xff then its released to software mempool.
124 struct mrvl_shadow_txq {
125 int head; /* write index - used when sending buffers */
126 int tail; /* read index - used when releasing buffers */
127 u16 size; /* queue occupied size */
128 u16 num_to_release; /* number of buffers sent, that can be released */
129 struct buff_release_entry ent[MRVL_PP2_TX_SHADOWQ_SIZE]; /* q entries */
133 struct mrvl_priv *priv;
134 struct rte_mempool *mp;
143 struct mrvl_priv *priv;
150 * Every tx queue should have dedicated shadow tx queue.
152 * Ports assigned by DPDK might not start at zero or be continuous so
153 * as a workaround define shadow queues for each possible port so that
154 * we eventually fit somewhere.
156 struct mrvl_shadow_txq shadow_txqs[RTE_MAX_ETHPORTS][RTE_MAX_LCORE];
158 /** Number of ports configured. */
160 static int mrvl_lcore_first;
161 static int mrvl_lcore_last;
164 mrvl_get_bpool_size(int pp2_id, int pool_id)
169 for (i = mrvl_lcore_first; i <= mrvl_lcore_last; i++)
170 size += mrvl_port_bpool_size[pp2_id][pool_id][i];
176 mrvl_reserve_bit(int *bitmap, int max)
178 int n = sizeof(*bitmap) * 8 - __builtin_clz(*bitmap);
189 * Configure rss based on dpdk rss configuration.
192 * Pointer to private structure.
194 * Pointer to RSS configuration.
197 * 0 on success, negative error value otherwise.
200 mrvl_configure_rss(struct mrvl_priv *priv, struct rte_eth_rss_conf *rss_conf)
202 if (rss_conf->rss_key)
203 RTE_LOG(WARNING, PMD, "Changing hash key is not supported\n");
205 if (rss_conf->rss_hf == 0) {
206 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
207 } else if (rss_conf->rss_hf & ETH_RSS_IPV4) {
208 priv->ppio_params.inqs_params.hash_type =
209 PP2_PPIO_HASH_T_2_TUPLE;
210 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
211 priv->ppio_params.inqs_params.hash_type =
212 PP2_PPIO_HASH_T_5_TUPLE;
213 priv->rss_hf_tcp = 1;
214 } else if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) {
215 priv->ppio_params.inqs_params.hash_type =
216 PP2_PPIO_HASH_T_5_TUPLE;
217 priv->rss_hf_tcp = 0;
226 * Ethernet device configuration.
228 * Prepare the driver for a given number of TX and RX queues and
232 * Pointer to Ethernet device structure.
235 * 0 on success, negative error value otherwise.
238 mrvl_dev_configure(struct rte_eth_dev *dev)
240 struct mrvl_priv *priv = dev->data->dev_private;
243 if (dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_NONE &&
244 dev->data->dev_conf.rxmode.mq_mode != ETH_MQ_RX_RSS) {
245 RTE_LOG(INFO, PMD, "Unsupported rx multi queue mode %d\n",
246 dev->data->dev_conf.rxmode.mq_mode);
250 if (!dev->data->dev_conf.rxmode.hw_strip_crc) {
252 "L2 CRC stripping is always enabled in hw\n");
253 dev->data->dev_conf.rxmode.hw_strip_crc = 1;
256 if (dev->data->dev_conf.rxmode.hw_vlan_strip) {
257 RTE_LOG(INFO, PMD, "VLAN stripping not supported\n");
261 if (dev->data->dev_conf.rxmode.split_hdr_size) {
262 RTE_LOG(INFO, PMD, "Split headers not supported\n");
266 if (dev->data->dev_conf.rxmode.enable_scatter) {
267 RTE_LOG(INFO, PMD, "RX Scatter/Gather not supported\n");
271 if (dev->data->dev_conf.rxmode.enable_lro) {
272 RTE_LOG(INFO, PMD, "LRO not supported\n");
276 if (dev->data->dev_conf.rxmode.jumbo_frame)
277 dev->data->mtu = dev->data->dev_conf.rxmode.max_rx_pkt_len -
278 ETHER_HDR_LEN - ETHER_CRC_LEN;
280 ret = mrvl_configure_rxqs(priv, dev->data->port_id,
281 dev->data->nb_rx_queues);
285 priv->ppio_params.outqs_params.num_outqs = dev->data->nb_tx_queues;
286 priv->ppio_params.maintain_stats = 1;
287 priv->nb_rx_queues = dev->data->nb_rx_queues;
289 if (dev->data->nb_rx_queues == 1 &&
290 dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
291 RTE_LOG(WARNING, PMD, "Disabling hash for 1 rx queue\n");
292 priv->ppio_params.inqs_params.hash_type = PP2_PPIO_HASH_T_NONE;
297 return mrvl_configure_rss(priv,
298 &dev->data->dev_conf.rx_adv_conf.rss_conf);
302 * DPDK callback to change the MTU.
304 * Setting the MTU affects hardware MRU (packets larger than the MRU
308 * Pointer to Ethernet device structure.
313 * 0 on success, negative error value otherwise.
316 mrvl_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
318 struct mrvl_priv *priv = dev->data->dev_private;
319 /* extra MV_MH_SIZE bytes are required for Marvell tag */
320 uint16_t mru = mtu + MV_MH_SIZE + ETHER_HDR_LEN + ETHER_CRC_LEN;
323 if (mtu < ETHER_MIN_MTU || mru > MRVL_PKT_SIZE_MAX)
326 ret = pp2_ppio_set_mru(priv->ppio, mru);
330 return pp2_ppio_set_mtu(priv->ppio, mtu);
334 * DPDK callback to bring the link up.
337 * Pointer to Ethernet device structure.
340 * 0 on success, negative error value otherwise.
343 mrvl_dev_set_link_up(struct rte_eth_dev *dev)
345 struct mrvl_priv *priv = dev->data->dev_private;
348 ret = pp2_ppio_enable(priv->ppio);
353 * mtu/mru can be updated if pp2_ppio_enable() was called at least once
354 * as pp2_ppio_enable() changes port->t_mode from default 0 to
355 * PP2_TRAFFIC_INGRESS_EGRESS.
357 * Set mtu to default DPDK value here.
359 ret = mrvl_mtu_set(dev, dev->data->mtu);
361 pp2_ppio_disable(priv->ppio);
363 dev->data->dev_link.link_status = ETH_LINK_UP;
369 * DPDK callback to bring the link down.
372 * Pointer to Ethernet device structure.
375 * 0 on success, negative error value otherwise.
378 mrvl_dev_set_link_down(struct rte_eth_dev *dev)
380 struct mrvl_priv *priv = dev->data->dev_private;
383 ret = pp2_ppio_disable(priv->ppio);
387 dev->data->dev_link.link_status = ETH_LINK_DOWN;
393 * DPDK callback to start the device.
396 * Pointer to Ethernet device structure.
399 * 0 on success, negative errno value on failure.
402 mrvl_dev_start(struct rte_eth_dev *dev)
404 struct mrvl_priv *priv = dev->data->dev_private;
405 char match[MRVL_MATCH_LEN];
408 snprintf(match, sizeof(match), "ppio-%d:%d",
409 priv->pp_id, priv->ppio_id);
410 priv->ppio_params.match = match;
413 * Calculate the maximum bpool size for refill feature to 1.5 of the
414 * configured size. In case the bpool size will exceed this value,
415 * superfluous buffers will be removed
417 priv->bpool_max_size = priv->bpool_init_size +
418 (priv->bpool_init_size >> 1);
420 * Calculate the minimum bpool size for refill feature as follows:
421 * 2 default burst sizes multiply by number of rx queues.
422 * If the bpool size will be below this value, new buffers will
423 * be added to the pool.
425 priv->bpool_min_size = priv->nb_rx_queues * MRVL_BURST_SIZE * 2;
427 ret = pp2_ppio_init(&priv->ppio_params, &priv->ppio);
432 * In case there are some some stale uc/mc mac addresses flush them
433 * here. It cannot be done during mrvl_dev_close() as port information
434 * is already gone at that point (due to pp2_ppio_deinit() in
437 if (!priv->uc_mc_flushed) {
438 ret = pp2_ppio_flush_mac_addrs(priv->ppio, 1, 1);
441 "Failed to flush uc/mc filter list\n");
444 priv->uc_mc_flushed = 1;
447 if (!priv->vlan_flushed) {
448 ret = pp2_ppio_flush_vlan(priv->ppio);
450 RTE_LOG(ERR, PMD, "Failed to flush vlan list\n");
453 * once pp2_ppio_flush_vlan() is supported jump to out
457 priv->vlan_flushed = 1;
460 /* For default QoS config, don't start classifier. */
462 ret = mrvl_start_qos_mapping(priv);
464 pp2_ppio_deinit(priv->ppio);
469 ret = mrvl_dev_set_link_up(dev);
475 pp2_ppio_deinit(priv->ppio);
480 * Flush receive queues.
483 * Pointer to Ethernet device structure.
486 mrvl_flush_rx_queues(struct rte_eth_dev *dev)
490 RTE_LOG(INFO, PMD, "Flushing rx queues\n");
491 for (i = 0; i < dev->data->nb_rx_queues; i++) {
495 struct mrvl_rxq *q = dev->data->rx_queues[i];
496 struct pp2_ppio_desc descs[MRVL_PP2_RXD_MAX];
498 num = MRVL_PP2_RXD_MAX;
499 ret = pp2_ppio_recv(q->priv->ppio,
500 q->priv->rxq_map[q->queue_id].tc,
501 q->priv->rxq_map[q->queue_id].inq,
502 descs, (uint16_t *)&num);
503 } while (ret == 0 && num);
508 * Flush transmit shadow queues.
511 * Pointer to Ethernet device structure.
514 mrvl_flush_tx_shadow_queues(struct rte_eth_dev *dev)
518 RTE_LOG(INFO, PMD, "Flushing tx shadow queues\n");
519 for (i = 0; i < RTE_MAX_LCORE; i++) {
520 struct mrvl_shadow_txq *sq =
521 &shadow_txqs[dev->data->port_id][i];
523 while (sq->tail != sq->head) {
524 uint64_t addr = cookie_addr_high |
525 sq->ent[sq->tail].buff.cookie;
526 rte_pktmbuf_free((struct rte_mbuf *)addr);
527 sq->tail = (sq->tail + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
530 memset(sq, 0, sizeof(*sq));
535 * Flush hardware bpool (buffer-pool).
538 * Pointer to Ethernet device structure.
541 mrvl_flush_bpool(struct rte_eth_dev *dev)
543 struct mrvl_priv *priv = dev->data->dev_private;
547 ret = pp2_bpool_get_num_buffs(priv->bpool, &num);
549 RTE_LOG(ERR, PMD, "Failed to get bpool buffers number\n");
554 struct pp2_buff_inf inf;
557 ret = pp2_bpool_get_buff(hifs[rte_lcore_id()], priv->bpool,
562 addr = cookie_addr_high | inf.cookie;
563 rte_pktmbuf_free((struct rte_mbuf *)addr);
568 * DPDK callback to stop the device.
571 * Pointer to Ethernet device structure.
574 mrvl_dev_stop(struct rte_eth_dev *dev)
576 struct mrvl_priv *priv = dev->data->dev_private;
578 mrvl_dev_set_link_down(dev);
579 mrvl_flush_rx_queues(dev);
580 mrvl_flush_tx_shadow_queues(dev);
582 pp2_cls_qos_tbl_deinit(priv->qos_tbl);
583 pp2_ppio_deinit(priv->ppio);
588 * DPDK callback to close the device.
591 * Pointer to Ethernet device structure.
594 mrvl_dev_close(struct rte_eth_dev *dev)
596 struct mrvl_priv *priv = dev->data->dev_private;
599 for (i = 0; i < priv->ppio_params.inqs_params.num_tcs; ++i) {
600 struct pp2_ppio_tc_params *tc_params =
601 &priv->ppio_params.inqs_params.tcs_params[i];
603 if (tc_params->inqs_params) {
604 rte_free(tc_params->inqs_params);
605 tc_params->inqs_params = NULL;
609 mrvl_flush_bpool(dev);
613 * DPDK callback to retrieve physical link information.
616 * Pointer to Ethernet device structure.
617 * @param wait_to_complete
618 * Wait for request completion (ignored).
621 * 0 on success, negative error value otherwise.
624 mrvl_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused)
628 * once MUSDK provides necessary API use it here
630 struct ethtool_cmd edata;
634 edata.cmd = ETHTOOL_GSET;
636 strcpy(req.ifr_name, dev->data->name);
637 req.ifr_data = (void *)&edata;
639 fd = socket(AF_INET, SOCK_DGRAM, 0);
643 ret = ioctl(fd, SIOCETHTOOL, &req);
651 switch (ethtool_cmd_speed(&edata)) {
653 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10M;
656 dev->data->dev_link.link_speed = ETH_SPEED_NUM_100M;
659 dev->data->dev_link.link_speed = ETH_SPEED_NUM_1G;
662 dev->data->dev_link.link_speed = ETH_SPEED_NUM_10G;
665 dev->data->dev_link.link_speed = ETH_SPEED_NUM_NONE;
668 dev->data->dev_link.link_duplex = edata.duplex ? ETH_LINK_FULL_DUPLEX :
669 ETH_LINK_HALF_DUPLEX;
670 dev->data->dev_link.link_autoneg = edata.autoneg ? ETH_LINK_AUTONEG :
677 * DPDK callback to enable promiscuous mode.
680 * Pointer to Ethernet device structure.
683 mrvl_promiscuous_enable(struct rte_eth_dev *dev)
685 struct mrvl_priv *priv = dev->data->dev_private;
688 ret = pp2_ppio_set_uc_promisc(priv->ppio, 1);
690 RTE_LOG(ERR, PMD, "Failed to enable promiscuous mode\n");
694 * DPDK callback to enable allmulti mode.
697 * Pointer to Ethernet device structure.
700 mrvl_allmulticast_enable(struct rte_eth_dev *dev)
702 struct mrvl_priv *priv = dev->data->dev_private;
705 ret = pp2_ppio_set_mc_promisc(priv->ppio, 1);
707 RTE_LOG(ERR, PMD, "Failed enable all-multicast mode\n");
711 * DPDK callback to disable promiscuous mode.
714 * Pointer to Ethernet device structure.
717 mrvl_promiscuous_disable(struct rte_eth_dev *dev)
719 struct mrvl_priv *priv = dev->data->dev_private;
722 ret = pp2_ppio_set_uc_promisc(priv->ppio, 0);
724 RTE_LOG(ERR, PMD, "Failed to disable promiscuous mode\n");
728 * DPDK callback to disable allmulticast mode.
731 * Pointer to Ethernet device structure.
734 mrvl_allmulticast_disable(struct rte_eth_dev *dev)
736 struct mrvl_priv *priv = dev->data->dev_private;
739 ret = pp2_ppio_set_mc_promisc(priv->ppio, 0);
741 RTE_LOG(ERR, PMD, "Failed to disable all-multicast mode\n");
745 * DPDK callback to remove a MAC address.
748 * Pointer to Ethernet device structure.
753 mrvl_mac_addr_remove(struct rte_eth_dev *dev, uint32_t index)
755 struct mrvl_priv *priv = dev->data->dev_private;
756 char buf[ETHER_ADDR_FMT_SIZE];
759 ret = pp2_ppio_remove_mac_addr(priv->ppio,
760 dev->data->mac_addrs[index].addr_bytes);
762 ether_format_addr(buf, sizeof(buf),
763 &dev->data->mac_addrs[index]);
764 RTE_LOG(ERR, PMD, "Failed to remove mac %s\n", buf);
769 * DPDK callback to add a MAC address.
772 * Pointer to Ethernet device structure.
774 * MAC address to register.
778 * VMDq pool index to associate address with (unused).
781 * 0 on success, negative error value otherwise.
784 mrvl_mac_addr_add(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
785 uint32_t index, uint32_t vmdq __rte_unused)
787 struct mrvl_priv *priv = dev->data->dev_private;
788 char buf[ETHER_ADDR_FMT_SIZE];
792 /* For setting index 0, mrvl_mac_addr_set() should be used.*/
796 * Maximum number of uc addresses can be tuned via kernel module mvpp2x
797 * parameter uc_filter_max. Maximum number of mc addresses is then
798 * MRVL_MAC_ADDRS_MAX - uc_filter_max. Currently it defaults to 4 and
801 * If more than uc_filter_max uc addresses were added to filter list
802 * then NIC will switch to promiscuous mode automatically.
804 * If more than MRVL_MAC_ADDRS_MAX - uc_filter_max number mc addresses
805 * were added to filter list then NIC will switch to all-multicast mode
808 ret = pp2_ppio_add_mac_addr(priv->ppio, mac_addr->addr_bytes);
810 ether_format_addr(buf, sizeof(buf), mac_addr);
811 RTE_LOG(ERR, PMD, "Failed to add mac %s\n", buf);
819 * DPDK callback to set the primary MAC address.
822 * Pointer to Ethernet device structure.
824 * MAC address to register.
827 mrvl_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
829 struct mrvl_priv *priv = dev->data->dev_private;
831 pp2_ppio_set_mac_addr(priv->ppio, mac_addr->addr_bytes);
834 * Port stops sending packets if pp2_ppio_set_mac_addr()
835 * was called after pp2_ppio_enable(). As a quick fix issue
836 * enable port once again.
838 pp2_ppio_enable(priv->ppio);
842 * DPDK callback to get device statistics.
845 * Pointer to Ethernet device structure.
847 * Stats structure output buffer.
850 * 0 on success, negative error value otherwise.
853 mrvl_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
855 struct mrvl_priv *priv = dev->data->dev_private;
856 struct pp2_ppio_statistics ppio_stats;
857 uint64_t drop_mac = 0;
858 unsigned int i, idx, ret;
860 for (i = 0; i < dev->data->nb_rx_queues; i++) {
861 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
862 struct pp2_ppio_inq_statistics rx_stats;
868 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
870 "rx queue %d stats out of range (0 - %d)\n",
871 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
875 ret = pp2_ppio_inq_get_statistics(priv->ppio,
876 priv->rxq_map[idx].tc,
877 priv->rxq_map[idx].inq,
881 "Failed to update rx queue %d stats\n", idx);
885 stats->q_ibytes[idx] = rxq->bytes_recv;
886 stats->q_ipackets[idx] = rx_stats.enq_desc - rxq->drop_mac;
887 stats->q_errors[idx] = rx_stats.drop_early +
888 rx_stats.drop_fullq +
891 stats->ibytes += rxq->bytes_recv;
892 drop_mac += rxq->drop_mac;
895 for (i = 0; i < dev->data->nb_tx_queues; i++) {
896 struct mrvl_txq *txq = dev->data->tx_queues[i];
897 struct pp2_ppio_outq_statistics tx_stats;
903 if (unlikely(idx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)) {
905 "tx queue %d stats out of range (0 - %d)\n",
906 idx, RTE_ETHDEV_QUEUE_STAT_CNTRS - 1);
909 ret = pp2_ppio_outq_get_statistics(priv->ppio, idx,
913 "Failed to update tx queue %d stats\n", idx);
917 stats->q_opackets[idx] = tx_stats.deq_desc;
918 stats->q_obytes[idx] = txq->bytes_sent;
919 stats->obytes += txq->bytes_sent;
922 ret = pp2_ppio_get_statistics(priv->ppio, &ppio_stats, 0);
924 RTE_LOG(ERR, PMD, "Failed to update port statistics\n");
928 stats->ipackets += ppio_stats.rx_packets - drop_mac;
929 stats->opackets += ppio_stats.tx_packets;
930 stats->imissed += ppio_stats.rx_fullq_dropped +
931 ppio_stats.rx_bm_dropped +
932 ppio_stats.rx_early_dropped +
933 ppio_stats.rx_fifo_dropped +
934 ppio_stats.rx_cls_dropped;
935 stats->ierrors = drop_mac;
941 * DPDK callback to clear device statistics.
944 * Pointer to Ethernet device structure.
947 mrvl_stats_reset(struct rte_eth_dev *dev)
949 struct mrvl_priv *priv = dev->data->dev_private;
952 for (i = 0; i < dev->data->nb_rx_queues; i++) {
953 struct mrvl_rxq *rxq = dev->data->rx_queues[i];
955 pp2_ppio_inq_get_statistics(priv->ppio, priv->rxq_map[i].tc,
956 priv->rxq_map[i].inq, NULL, 1);
961 for (i = 0; i < dev->data->nb_tx_queues; i++) {
962 struct mrvl_txq *txq = dev->data->tx_queues[i];
964 pp2_ppio_outq_get_statistics(priv->ppio, i, NULL, 1);
968 pp2_ppio_get_statistics(priv->ppio, NULL, 1);
972 * DPDK callback to get information about the device.
975 * Pointer to Ethernet device structure (unused).
977 * Info structure output buffer.
980 mrvl_dev_infos_get(struct rte_eth_dev *dev __rte_unused,
981 struct rte_eth_dev_info *info)
983 info->speed_capa = ETH_LINK_SPEED_10M |
984 ETH_LINK_SPEED_100M |
988 info->max_rx_queues = MRVL_PP2_RXQ_MAX;
989 info->max_tx_queues = MRVL_PP2_TXQ_MAX;
990 info->max_mac_addrs = MRVL_MAC_ADDRS_MAX;
992 info->rx_desc_lim.nb_max = MRVL_PP2_RXD_MAX;
993 info->rx_desc_lim.nb_min = MRVL_PP2_RXD_MIN;
994 info->rx_desc_lim.nb_align = MRVL_PP2_RXD_ALIGN;
996 info->tx_desc_lim.nb_max = MRVL_PP2_TXD_MAX;
997 info->tx_desc_lim.nb_min = MRVL_PP2_TXD_MIN;
998 info->tx_desc_lim.nb_align = MRVL_PP2_TXD_ALIGN;
1000 info->rx_offload_capa = DEV_RX_OFFLOAD_JUMBO_FRAME |
1001 DEV_RX_OFFLOAD_VLAN_FILTER |
1002 DEV_RX_OFFLOAD_IPV4_CKSUM |
1003 DEV_RX_OFFLOAD_UDP_CKSUM |
1004 DEV_RX_OFFLOAD_TCP_CKSUM;
1006 info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM |
1007 DEV_TX_OFFLOAD_UDP_CKSUM |
1008 DEV_TX_OFFLOAD_TCP_CKSUM;
1010 info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1011 ETH_RSS_NONFRAG_IPV4_TCP |
1012 ETH_RSS_NONFRAG_IPV4_UDP;
1014 /* By default packets are dropped if no descriptors are available */
1015 info->default_rxconf.rx_drop_en = 1;
1017 info->max_rx_pktlen = MRVL_PKT_SIZE_MAX;
1021 * Return supported packet types.
1024 * Pointer to Ethernet device structure (unused).
1027 * Const pointer to the table with supported packet types.
1029 static const uint32_t *
1030 mrvl_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1032 static const uint32_t ptypes[] = {
1035 RTE_PTYPE_L3_IPV4_EXT,
1036 RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
1038 RTE_PTYPE_L3_IPV6_EXT,
1039 RTE_PTYPE_L2_ETHER_ARP,
1048 * DPDK callback to get information about specific receive queue.
1051 * Pointer to Ethernet device structure.
1052 * @param rx_queue_id
1053 * Receive queue index.
1055 * Receive queue information structure.
1057 static void mrvl_rxq_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1058 struct rte_eth_rxq_info *qinfo)
1060 struct mrvl_rxq *q = dev->data->rx_queues[rx_queue_id];
1061 struct mrvl_priv *priv = dev->data->dev_private;
1062 int inq = priv->rxq_map[rx_queue_id].inq;
1063 int tc = priv->rxq_map[rx_queue_id].tc;
1064 struct pp2_ppio_tc_params *tc_params =
1065 &priv->ppio_params.inqs_params.tcs_params[tc];
1068 qinfo->nb_desc = tc_params->inqs_params[inq].size;
1072 * DPDK callback to get information about specific transmit queue.
1075 * Pointer to Ethernet device structure.
1076 * @param tx_queue_id
1077 * Transmit queue index.
1079 * Transmit queue information structure.
1081 static void mrvl_txq_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1082 struct rte_eth_txq_info *qinfo)
1084 struct mrvl_priv *priv = dev->data->dev_private;
1087 priv->ppio_params.outqs_params.outqs_params[tx_queue_id].size;
1091 * DPDK callback to Configure a VLAN filter.
1094 * Pointer to Ethernet device structure.
1096 * VLAN ID to filter.
1101 * 0 on success, negative error value otherwise.
1104 mrvl_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1106 struct mrvl_priv *priv = dev->data->dev_private;
1108 return on ? pp2_ppio_add_vlan(priv->ppio, vlan_id) :
1109 pp2_ppio_remove_vlan(priv->ppio, vlan_id);
1113 * Release buffers to hardware bpool (buffer-pool)
1116 * Receive queue pointer.
1118 * Number of buffers to release to bpool.
1121 * 0 on success, negative error value otherwise.
1124 mrvl_fill_bpool(struct mrvl_rxq *rxq, int num)
1126 struct buff_release_entry entries[MRVL_PP2_TXD_MAX];
1127 struct rte_mbuf *mbufs[MRVL_PP2_TXD_MAX];
1129 unsigned int core_id = rte_lcore_id();
1130 struct pp2_hif *hif = hifs[core_id];
1131 struct pp2_bpool *bpool = rxq->priv->bpool;
1133 ret = rte_pktmbuf_alloc_bulk(rxq->mp, mbufs, num);
1137 if (cookie_addr_high == MRVL_COOKIE_ADDR_INVALID)
1139 (uint64_t)mbufs[0] & MRVL_COOKIE_HIGH_ADDR_MASK;
1141 for (i = 0; i < num; i++) {
1142 if (((uint64_t)mbufs[i] & MRVL_COOKIE_HIGH_ADDR_MASK)
1143 != cookie_addr_high) {
1145 "mbuf virtual addr high 0x%lx out of range\n",
1146 (uint64_t)mbufs[i] >> 32);
1150 entries[i].buff.addr =
1151 rte_mbuf_data_dma_addr_default(mbufs[i]);
1152 entries[i].buff.cookie = (pp2_cookie_t)(uint64_t)mbufs[i];
1153 entries[i].bpool = bpool;
1156 pp2_bpool_put_buffs(hif, entries, (uint16_t *)&i);
1157 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] += i;
1164 for (; i < num; i++)
1165 rte_pktmbuf_free(mbufs[i]);
1171 * DPDK callback to configure the receive queue.
1174 * Pointer to Ethernet device structure.
1178 * Number of descriptors to configure in queue.
1180 * NUMA socket on which memory must be allocated.
1182 * Thresholds parameters (unused_).
1184 * Memory pool for buffer allocations.
1187 * 0 on success, negative error value otherwise.
1190 mrvl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1191 unsigned int socket,
1192 const struct rte_eth_rxconf *conf __rte_unused,
1193 struct rte_mempool *mp)
1195 struct mrvl_priv *priv = dev->data->dev_private;
1196 struct mrvl_rxq *rxq;
1198 max_rx_pkt_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
1201 if (priv->rxq_map[idx].tc == MRVL_UNKNOWN_TC) {
1203 * Unknown TC mapping, mapping will not have a correct queue.
1205 RTE_LOG(ERR, PMD, "Unknown TC mapping for queue %hu eth%hhu\n",
1206 idx, priv->ppio_id);
1210 min_size = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM -
1211 MRVL_PKT_EFFEC_OFFS;
1212 if (min_size < max_rx_pkt_len) {
1214 "Mbuf size must be increased to %u bytes to hold up to %u bytes of data.\n",
1215 max_rx_pkt_len + RTE_PKTMBUF_HEADROOM +
1216 MRVL_PKT_EFFEC_OFFS,
1221 if (dev->data->rx_queues[idx]) {
1222 rte_free(dev->data->rx_queues[idx]);
1223 dev->data->rx_queues[idx] = NULL;
1226 rxq = rte_zmalloc_socket("rxq", sizeof(*rxq), 0, socket);
1232 rxq->cksum_enabled = dev->data->dev_conf.rxmode.hw_ip_checksum;
1233 rxq->queue_id = idx;
1234 rxq->port_id = dev->data->port_id;
1235 mrvl_port_to_bpool_lookup[rxq->port_id] = priv->bpool;
1237 tc = priv->rxq_map[rxq->queue_id].tc,
1238 inq = priv->rxq_map[rxq->queue_id].inq;
1239 priv->ppio_params.inqs_params.tcs_params[tc].inqs_params[inq].size =
1242 ret = mrvl_fill_bpool(rxq, desc);
1248 priv->bpool_init_size += desc;
1250 dev->data->rx_queues[idx] = rxq;
1256 * DPDK callback to release the receive queue.
1259 * Generic receive queue pointer.
1262 mrvl_rx_queue_release(void *rxq)
1264 struct mrvl_rxq *q = rxq;
1265 struct pp2_ppio_tc_params *tc_params;
1266 int i, num, tc, inq;
1271 tc = q->priv->rxq_map[q->queue_id].tc;
1272 inq = q->priv->rxq_map[q->queue_id].inq;
1273 tc_params = &q->priv->ppio_params.inqs_params.tcs_params[tc];
1274 num = tc_params->inqs_params[inq].size;
1275 for (i = 0; i < num; i++) {
1276 struct pp2_buff_inf inf;
1279 pp2_bpool_get_buff(hifs[rte_lcore_id()], q->priv->bpool, &inf);
1280 addr = cookie_addr_high | inf.cookie;
1281 rte_pktmbuf_free((struct rte_mbuf *)addr);
1288 * DPDK callback to configure the transmit queue.
1291 * Pointer to Ethernet device structure.
1293 * Transmit queue index.
1295 * Number of descriptors to configure in the queue.
1297 * NUMA socket on which memory must be allocated.
1299 * Thresholds parameters (unused).
1302 * 0 on success, negative error value otherwise.
1305 mrvl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc,
1306 unsigned int socket,
1307 const struct rte_eth_txconf *conf __rte_unused)
1309 struct mrvl_priv *priv = dev->data->dev_private;
1310 struct mrvl_txq *txq;
1312 if (dev->data->tx_queues[idx]) {
1313 rte_free(dev->data->tx_queues[idx]);
1314 dev->data->tx_queues[idx] = NULL;
1317 txq = rte_zmalloc_socket("txq", sizeof(*txq), 0, socket);
1322 txq->queue_id = idx;
1323 txq->port_id = dev->data->port_id;
1324 dev->data->tx_queues[idx] = txq;
1326 priv->ppio_params.outqs_params.outqs_params[idx].size = desc;
1327 priv->ppio_params.outqs_params.outqs_params[idx].weight = 1;
1333 * DPDK callback to release the transmit queue.
1336 * Generic transmit queue pointer.
1339 mrvl_tx_queue_release(void *txq)
1341 struct mrvl_txq *q = txq;
1350 * Update RSS hash configuration
1353 * Pointer to Ethernet device structure.
1355 * Pointer to RSS configuration.
1358 * 0 on success, negative error value otherwise.
1361 mrvl_rss_hash_update(struct rte_eth_dev *dev,
1362 struct rte_eth_rss_conf *rss_conf)
1364 struct mrvl_priv *priv = dev->data->dev_private;
1366 return mrvl_configure_rss(priv, rss_conf);
1370 * DPDK callback to get RSS hash configuration.
1373 * Pointer to Ethernet device structure.
1375 * Pointer to RSS configuration.
1381 mrvl_rss_hash_conf_get(struct rte_eth_dev *dev,
1382 struct rte_eth_rss_conf *rss_conf)
1384 struct mrvl_priv *priv = dev->data->dev_private;
1385 enum pp2_ppio_hash_type hash_type =
1386 priv->ppio_params.inqs_params.hash_type;
1388 rss_conf->rss_key = NULL;
1390 if (hash_type == PP2_PPIO_HASH_T_NONE)
1391 rss_conf->rss_hf = 0;
1392 else if (hash_type == PP2_PPIO_HASH_T_2_TUPLE)
1393 rss_conf->rss_hf = ETH_RSS_IPV4;
1394 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && priv->rss_hf_tcp)
1395 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_TCP;
1396 else if (hash_type == PP2_PPIO_HASH_T_5_TUPLE && !priv->rss_hf_tcp)
1397 rss_conf->rss_hf = ETH_RSS_NONFRAG_IPV4_UDP;
1402 static const struct eth_dev_ops mrvl_ops = {
1403 .dev_configure = mrvl_dev_configure,
1404 .dev_start = mrvl_dev_start,
1405 .dev_stop = mrvl_dev_stop,
1406 .dev_set_link_up = mrvl_dev_set_link_up,
1407 .dev_set_link_down = mrvl_dev_set_link_down,
1408 .dev_close = mrvl_dev_close,
1409 .link_update = mrvl_link_update,
1410 .promiscuous_enable = mrvl_promiscuous_enable,
1411 .allmulticast_enable = mrvl_allmulticast_enable,
1412 .promiscuous_disable = mrvl_promiscuous_disable,
1413 .allmulticast_disable = mrvl_allmulticast_disable,
1414 .mac_addr_remove = mrvl_mac_addr_remove,
1415 .mac_addr_add = mrvl_mac_addr_add,
1416 .mac_addr_set = mrvl_mac_addr_set,
1417 .mtu_set = mrvl_mtu_set,
1418 .stats_get = mrvl_stats_get,
1419 .stats_reset = mrvl_stats_reset,
1420 .dev_infos_get = mrvl_dev_infos_get,
1421 .dev_supported_ptypes_get = mrvl_dev_supported_ptypes_get,
1422 .rxq_info_get = mrvl_rxq_info_get,
1423 .txq_info_get = mrvl_txq_info_get,
1424 .vlan_filter_set = mrvl_vlan_filter_set,
1425 .rx_queue_setup = mrvl_rx_queue_setup,
1426 .rx_queue_release = mrvl_rx_queue_release,
1427 .tx_queue_setup = mrvl_tx_queue_setup,
1428 .tx_queue_release = mrvl_tx_queue_release,
1429 .rss_hash_update = mrvl_rss_hash_update,
1430 .rss_hash_conf_get = mrvl_rss_hash_conf_get,
1434 * Return packet type information and l3/l4 offsets.
1437 * Pointer to the received packet descriptor.
1444 * Packet type information.
1446 static inline uint64_t
1447 mrvl_desc_to_packet_type_and_offset(struct pp2_ppio_desc *desc,
1448 uint8_t *l3_offset, uint8_t *l4_offset)
1450 enum pp2_inq_l3_type l3_type;
1451 enum pp2_inq_l4_type l4_type;
1452 uint64_t packet_type;
1454 pp2_ppio_inq_desc_get_l3_info(desc, &l3_type, l3_offset);
1455 pp2_ppio_inq_desc_get_l4_info(desc, &l4_type, l4_offset);
1457 packet_type = RTE_PTYPE_L2_ETHER;
1460 case PP2_INQ_L3_TYPE_IPV4_NO_OPTS:
1461 packet_type |= RTE_PTYPE_L3_IPV4;
1463 case PP2_INQ_L3_TYPE_IPV4_OK:
1464 packet_type |= RTE_PTYPE_L3_IPV4_EXT;
1466 case PP2_INQ_L3_TYPE_IPV4_TTL_ZERO:
1467 packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
1469 case PP2_INQ_L3_TYPE_IPV6_NO_EXT:
1470 packet_type |= RTE_PTYPE_L3_IPV6;
1472 case PP2_INQ_L3_TYPE_IPV6_EXT:
1473 packet_type |= RTE_PTYPE_L3_IPV6_EXT;
1475 case PP2_INQ_L3_TYPE_ARP:
1476 packet_type |= RTE_PTYPE_L2_ETHER_ARP;
1478 * In case of ARP l4_offset is set to wrong value.
1479 * Set it to proper one so that later on mbuf->l3_len can be
1480 * calculated subtracting l4_offset and l3_offset.
1482 *l4_offset = *l3_offset + MRVL_ARP_LENGTH;
1485 RTE_LOG(DEBUG, PMD, "Failed to recognise l3 packet type\n");
1490 case PP2_INQ_L4_TYPE_TCP:
1491 packet_type |= RTE_PTYPE_L4_TCP;
1493 case PP2_INQ_L4_TYPE_UDP:
1494 packet_type |= RTE_PTYPE_L4_UDP;
1497 RTE_LOG(DEBUG, PMD, "Failed to recognise l4 packet type\n");
1505 * Get offload information from the received packet descriptor.
1508 * Pointer to the received packet descriptor.
1511 * Mbuf offload flags.
1513 static inline uint64_t
1514 mrvl_desc_to_ol_flags(struct pp2_ppio_desc *desc)
1517 enum pp2_inq_desc_status status;
1519 status = pp2_ppio_inq_desc_get_l3_pkt_error(desc);
1520 if (unlikely(status != PP2_DESC_ERR_OK))
1521 flags = PKT_RX_IP_CKSUM_BAD;
1523 flags = PKT_RX_IP_CKSUM_GOOD;
1525 status = pp2_ppio_inq_desc_get_l4_pkt_error(desc);
1526 if (unlikely(status != PP2_DESC_ERR_OK))
1527 flags |= PKT_RX_L4_CKSUM_BAD;
1529 flags |= PKT_RX_L4_CKSUM_GOOD;
1535 * DPDK callback for receive.
1538 * Generic pointer to the receive queue.
1540 * Array to store received packets.
1542 * Maximum number of packets in array.
1545 * Number of packets successfully received.
1548 mrvl_rx_pkt_burst(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1550 struct mrvl_rxq *q = rxq;
1551 struct pp2_ppio_desc descs[nb_pkts];
1552 struct pp2_bpool *bpool;
1553 int i, ret, rx_done = 0;
1555 unsigned int core_id = rte_lcore_id();
1557 if (unlikely(!q->priv->ppio))
1560 bpool = q->priv->bpool;
1562 ret = pp2_ppio_recv(q->priv->ppio, q->priv->rxq_map[q->queue_id].tc,
1563 q->priv->rxq_map[q->queue_id].inq, descs, &nb_pkts);
1564 if (unlikely(ret < 0)) {
1565 RTE_LOG(ERR, PMD, "Failed to receive packets\n");
1568 mrvl_port_bpool_size[bpool->pp2_id][bpool->id][core_id] -= nb_pkts;
1570 for (i = 0; i < nb_pkts; i++) {
1571 struct rte_mbuf *mbuf;
1572 uint8_t l3_offset, l4_offset;
1573 enum pp2_inq_desc_status status;
1576 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1577 struct pp2_ppio_desc *pref_desc;
1580 pref_desc = &descs[i + MRVL_MUSDK_PREFETCH_SHIFT];
1581 pref_addr = cookie_addr_high |
1582 pp2_ppio_inq_desc_get_cookie(pref_desc);
1583 rte_mbuf_prefetch_part1((struct rte_mbuf *)(pref_addr));
1584 rte_mbuf_prefetch_part2((struct rte_mbuf *)(pref_addr));
1587 addr = cookie_addr_high |
1588 pp2_ppio_inq_desc_get_cookie(&descs[i]);
1589 mbuf = (struct rte_mbuf *)addr;
1590 rte_pktmbuf_reset(mbuf);
1592 /* drop packet in case of mac, overrun or resource error */
1593 status = pp2_ppio_inq_desc_get_l2_pkt_error(&descs[i]);
1594 if (unlikely(status != PP2_DESC_ERR_OK)) {
1595 struct pp2_buff_inf binf = {
1596 .addr = rte_mbuf_data_dma_addr_default(mbuf),
1597 .cookie = (pp2_cookie_t)(uint64_t)mbuf,
1600 pp2_bpool_put_buff(hifs[core_id], bpool, &binf);
1601 mrvl_port_bpool_size
1602 [bpool->pp2_id][bpool->id][core_id]++;
1607 mbuf->data_off += MRVL_PKT_EFFEC_OFFS;
1608 mbuf->pkt_len = pp2_ppio_inq_desc_get_pkt_len(&descs[i]);
1609 mbuf->data_len = mbuf->pkt_len;
1610 mbuf->port = q->port_id;
1612 mrvl_desc_to_packet_type_and_offset(&descs[i],
1615 mbuf->l2_len = l3_offset;
1616 mbuf->l3_len = l4_offset - l3_offset;
1618 if (likely(q->cksum_enabled))
1619 mbuf->ol_flags = mrvl_desc_to_ol_flags(&descs[i]);
1621 rx_pkts[rx_done++] = mbuf;
1622 q->bytes_recv += mbuf->pkt_len;
1625 if (rte_spinlock_trylock(&q->priv->lock) == 1) {
1626 num = mrvl_get_bpool_size(bpool->pp2_id, bpool->id);
1628 if (unlikely(num <= q->priv->bpool_min_size ||
1629 (!rx_done && num < q->priv->bpool_init_size))) {
1630 ret = mrvl_fill_bpool(q, MRVL_BURST_SIZE);
1632 RTE_LOG(ERR, PMD, "Failed to fill bpool\n");
1633 } else if (unlikely(num > q->priv->bpool_max_size)) {
1635 int pkt_to_remove = num - q->priv->bpool_init_size;
1636 struct rte_mbuf *mbuf;
1637 struct pp2_buff_inf buff;
1640 "\nport-%d:%d: bpool %d oversize - remove %d buffers (pool size: %d -> %d)\n",
1641 bpool->pp2_id, q->priv->ppio->port_id,
1642 bpool->id, pkt_to_remove, num,
1643 q->priv->bpool_init_size);
1645 for (i = 0; i < pkt_to_remove; i++) {
1646 pp2_bpool_get_buff(hifs[core_id], bpool, &buff);
1647 mbuf = (struct rte_mbuf *)
1648 (cookie_addr_high | buff.cookie);
1649 rte_pktmbuf_free(mbuf);
1651 mrvl_port_bpool_size
1652 [bpool->pp2_id][bpool->id][core_id] -=
1655 rte_spinlock_unlock(&q->priv->lock);
1662 * Prepare offload information.
1666 * @param packet_type
1667 * Packet type bitfield.
1669 * Pointer to the pp2_ouq_l3_type structure.
1671 * Pointer to the pp2_outq_l4_type structure.
1672 * @param gen_l3_cksum
1673 * Will be set to 1 in case l3 checksum is computed.
1675 * Will be set to 1 in case l4 checksum is computed.
1678 * 0 on success, negative error value otherwise.
1681 mrvl_prepare_proto_info(uint64_t ol_flags, uint32_t packet_type,
1682 enum pp2_outq_l3_type *l3_type,
1683 enum pp2_outq_l4_type *l4_type,
1688 * Based on ol_flags prepare information
1689 * for pp2_ppio_outq_desc_set_proto_info() which setups descriptor
1692 if (ol_flags & PKT_TX_IPV4) {
1693 *l3_type = PP2_OUTQ_L3_TYPE_IPV4;
1694 *gen_l3_cksum = ol_flags & PKT_TX_IP_CKSUM ? 1 : 0;
1695 } else if (ol_flags & PKT_TX_IPV6) {
1696 *l3_type = PP2_OUTQ_L3_TYPE_IPV6;
1697 /* no checksum for ipv6 header */
1700 /* if something different then stop processing */
1704 ol_flags &= PKT_TX_L4_MASK;
1705 if ((packet_type & RTE_PTYPE_L4_TCP) &&
1706 ol_flags == PKT_TX_TCP_CKSUM) {
1707 *l4_type = PP2_OUTQ_L4_TYPE_TCP;
1709 } else if ((packet_type & RTE_PTYPE_L4_UDP) &&
1710 ol_flags == PKT_TX_UDP_CKSUM) {
1711 *l4_type = PP2_OUTQ_L4_TYPE_UDP;
1714 *l4_type = PP2_OUTQ_L4_TYPE_OTHER;
1715 /* no checksum for other type */
1723 * Release already sent buffers to bpool (buffer-pool).
1726 * Pointer to the port structure.
1728 * Pointer to the MUSDK hardware interface.
1730 * Pointer to the shadow queue.
1734 * Force releasing packets.
1737 mrvl_free_sent_buffers(struct pp2_ppio *ppio, struct pp2_hif *hif,
1738 struct mrvl_shadow_txq *sq, int qid, int force)
1740 struct buff_release_entry *entry;
1741 uint16_t nb_done = 0, num = 0, skip_bufs = 0;
1742 int i, core_id = rte_lcore_id();
1744 pp2_ppio_get_num_outq_done(ppio, hif, qid, &nb_done);
1746 sq->num_to_release += nb_done;
1748 if (likely(!force &&
1749 sq->num_to_release < MRVL_PP2_BUF_RELEASE_BURST_SIZE))
1752 nb_done = sq->num_to_release;
1753 sq->num_to_release = 0;
1755 for (i = 0; i < nb_done; i++) {
1756 entry = &sq->ent[sq->tail + num];
1757 if (unlikely(!entry->buff.addr)) {
1759 "Shadow memory @%d: cookie(%lx), pa(%lx)!\n",
1760 sq->tail, (u64)entry->buff.cookie,
1761 (u64)entry->buff.addr);
1766 if (unlikely(!entry->bpool)) {
1767 struct rte_mbuf *mbuf;
1769 mbuf = (struct rte_mbuf *)
1770 (cookie_addr_high | entry->buff.cookie);
1771 rte_pktmbuf_free(mbuf);
1776 mrvl_port_bpool_size
1777 [entry->bpool->pp2_id][entry->bpool->id][core_id]++;
1779 if (unlikely(sq->tail + num == MRVL_PP2_TX_SHADOWQ_SIZE))
1784 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1786 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1792 pp2_bpool_put_buffs(hif, &sq->ent[sq->tail], &num);
1793 sq->tail = (sq->tail + num) & MRVL_PP2_TX_SHADOWQ_MASK;
1799 * DPDK callback for transmit.
1802 * Generic pointer transmit queue.
1804 * Packets to transmit.
1806 * Number of packets in array.
1809 * Number of packets successfully transmitted.
1812 mrvl_tx_pkt_burst(void *txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1814 struct mrvl_txq *q = txq;
1815 struct mrvl_shadow_txq *sq = &shadow_txqs[q->port_id][rte_lcore_id()];
1816 struct pp2_hif *hif = hifs[rte_lcore_id()];
1817 struct pp2_ppio_desc descs[nb_pkts];
1818 int i, ret, bytes_sent = 0;
1819 uint16_t num, sq_free_size;
1822 if (unlikely(!q->priv->ppio))
1826 mrvl_free_sent_buffers(q->priv->ppio, hif, sq, q->queue_id, 0);
1828 sq_free_size = MRVL_PP2_TX_SHADOWQ_SIZE - sq->size - 1;
1829 if (unlikely(nb_pkts > sq_free_size)) {
1831 "No room in shadow queue for %d packets! %d packets will be sent.\n",
1832 nb_pkts, sq_free_size);
1833 nb_pkts = sq_free_size;
1836 for (i = 0; i < nb_pkts; i++) {
1837 struct rte_mbuf *mbuf = tx_pkts[i];
1838 int gen_l3_cksum, gen_l4_cksum;
1839 enum pp2_outq_l3_type l3_type;
1840 enum pp2_outq_l4_type l4_type;
1842 if (likely(nb_pkts - i > MRVL_MUSDK_PREFETCH_SHIFT)) {
1843 struct rte_mbuf *pref_pkt_hdr;
1845 pref_pkt_hdr = tx_pkts[i + MRVL_MUSDK_PREFETCH_SHIFT];
1846 rte_mbuf_prefetch_part1(pref_pkt_hdr);
1847 rte_mbuf_prefetch_part2(pref_pkt_hdr);
1850 sq->ent[sq->head].buff.cookie = (pp2_cookie_t)(uint64_t)mbuf;
1851 sq->ent[sq->head].buff.addr =
1852 rte_mbuf_data_dma_addr_default(mbuf);
1853 sq->ent[sq->head].bpool =
1854 (unlikely(mbuf->port == 0xff || mbuf->refcnt > 1)) ?
1855 NULL : mrvl_port_to_bpool_lookup[mbuf->port];
1856 sq->head = (sq->head + 1) & MRVL_PP2_TX_SHADOWQ_MASK;
1859 pp2_ppio_outq_desc_reset(&descs[i]);
1860 pp2_ppio_outq_desc_set_phys_addr(&descs[i],
1861 rte_pktmbuf_mtophys(mbuf));
1862 pp2_ppio_outq_desc_set_pkt_offset(&descs[i], 0);
1863 pp2_ppio_outq_desc_set_pkt_len(&descs[i],
1864 rte_pktmbuf_pkt_len(mbuf));
1866 bytes_sent += rte_pktmbuf_pkt_len(mbuf);
1868 * in case unsupported ol_flags were passed
1869 * do not update descriptor offload information
1871 ret = mrvl_prepare_proto_info(mbuf->ol_flags, mbuf->packet_type,
1872 &l3_type, &l4_type, &gen_l3_cksum,
1877 pp2_ppio_outq_desc_set_proto_info(&descs[i], l3_type, l4_type,
1879 mbuf->l2_len + mbuf->l3_len,
1880 gen_l3_cksum, gen_l4_cksum);
1884 pp2_ppio_send(q->priv->ppio, hif, q->queue_id, descs, &nb_pkts);
1885 /* number of packets that were not sent */
1886 if (unlikely(num > nb_pkts)) {
1887 for (i = nb_pkts; i < num; i++) {
1888 sq->head = (MRVL_PP2_TX_SHADOWQ_SIZE + sq->head - 1) &
1889 MRVL_PP2_TX_SHADOWQ_MASK;
1890 addr = cookie_addr_high | sq->ent[sq->head].buff.cookie;
1892 rte_pktmbuf_pkt_len((struct rte_mbuf *)addr);
1894 sq->size -= num - nb_pkts;
1897 q->bytes_sent += bytes_sent;
1903 * Initialize packet processor.
1906 * 0 on success, negative error value otherwise.
1911 struct pp2_init_params init_params;
1913 memset(&init_params, 0, sizeof(init_params));
1914 init_params.hif_reserved_map = MRVL_MUSDK_HIFS_RESERVED;
1915 init_params.bm_pool_reserved_map = MRVL_MUSDK_BPOOLS_RESERVED;
1916 init_params.rss_tbl_reserved_map = MRVL_MUSDK_RSS_RESERVED;
1918 return pp2_init(&init_params);
1922 * Deinitialize packet processor.
1925 * 0 on success, negative error value otherwise.
1928 mrvl_deinit_pp2(void)
1934 * Create private device structure.
1937 * Pointer to the port name passed in the initialization parameters.
1940 * Pointer to the newly allocated private device structure.
1942 static struct mrvl_priv *
1943 mrvl_priv_create(const char *dev_name)
1945 struct pp2_bpool_params bpool_params;
1946 char match[MRVL_MATCH_LEN];
1947 struct mrvl_priv *priv;
1950 priv = rte_zmalloc_socket(dev_name, sizeof(*priv), 0, rte_socket_id());
1954 ret = pp2_netdev_get_ppio_info((char *)(uintptr_t)dev_name,
1955 &priv->pp_id, &priv->ppio_id);
1959 bpool_bit = mrvl_reserve_bit(&used_bpools[priv->pp_id],
1960 PP2_BPOOL_NUM_POOLS);
1963 priv->bpool_bit = bpool_bit;
1965 snprintf(match, sizeof(match), "pool-%d:%d", priv->pp_id,
1967 memset(&bpool_params, 0, sizeof(bpool_params));
1968 bpool_params.match = match;
1969 bpool_params.buff_len = MRVL_PKT_SIZE_MAX + MRVL_PKT_EFFEC_OFFS;
1970 ret = pp2_bpool_init(&bpool_params, &priv->bpool);
1972 goto out_clear_bpool_bit;
1974 priv->ppio_params.type = PP2_PPIO_T_NIC;
1975 rte_spinlock_init(&priv->lock);
1978 out_clear_bpool_bit:
1979 used_bpools[priv->pp_id] &= ~(1 << priv->bpool_bit);
1986 * Create device representing Ethernet port.
1989 * Pointer to the port's name.
1992 * 0 on success, negative error value otherwise.
1995 mrvl_eth_dev_create(struct rte_vdev_device *vdev, const char *name)
1997 int ret, fd = socket(AF_INET, SOCK_DGRAM, 0);
1998 struct rte_eth_dev *eth_dev;
1999 struct mrvl_priv *priv;
2002 eth_dev = rte_eth_dev_allocate(name);
2006 priv = mrvl_priv_create(name);
2012 eth_dev->data->mac_addrs =
2013 rte_zmalloc("mac_addrs",
2014 ETHER_ADDR_LEN * MRVL_MAC_ADDRS_MAX, 0);
2015 if (!eth_dev->data->mac_addrs) {
2016 RTE_LOG(ERR, PMD, "Failed to allocate space for eth addrs\n");
2021 memset(&req, 0, sizeof(req));
2022 strcpy(req.ifr_name, name);
2023 ret = ioctl(fd, SIOCGIFHWADDR, &req);
2027 memcpy(eth_dev->data->mac_addrs[0].addr_bytes,
2028 req.ifr_addr.sa_data, ETHER_ADDR_LEN);
2030 eth_dev->rx_pkt_burst = mrvl_rx_pkt_burst;
2031 eth_dev->tx_pkt_burst = mrvl_tx_pkt_burst;
2032 eth_dev->data->dev_private = priv;
2033 eth_dev->device = &vdev->device;
2034 eth_dev->dev_ops = &mrvl_ops;
2038 rte_free(eth_dev->data->mac_addrs);
2040 rte_eth_dev_release_port(eth_dev);
2048 * Cleanup previously created device representing Ethernet port.
2051 * Pointer to the port name.
2054 mrvl_eth_dev_destroy(const char *name)
2056 struct rte_eth_dev *eth_dev;
2057 struct mrvl_priv *priv;
2059 eth_dev = rte_eth_dev_allocated(name);
2063 priv = eth_dev->data->dev_private;
2064 pp2_bpool_deinit(priv->bpool);
2066 rte_free(eth_dev->data->mac_addrs);
2067 rte_eth_dev_release_port(eth_dev);
2071 * Callback used by rte_kvargs_process() during argument parsing.
2074 * Pointer to the parsed key (unused).
2076 * Pointer to the parsed value.
2078 * Pointer to the extra arguments which contains address of the
2079 * table of pointers to parsed interface names.
2085 mrvl_get_ifnames(const char *key __rte_unused, const char *value,
2088 const char **ifnames = extra_args;
2090 ifnames[mrvl_ports_nb++] = value;
2096 * Initialize per-lcore MUSDK hardware interfaces (hifs).
2099 * 0 on success, negative error value otherwise.
2102 mrvl_init_hifs(void)
2104 struct pp2_hif_params params;
2105 char match[MRVL_MATCH_LEN];
2108 RTE_LCORE_FOREACH(i) {
2109 ret = mrvl_reserve_bit(&used_hifs, MRVL_MUSDK_HIFS_MAX);
2113 snprintf(match, sizeof(match), "hif-%d", ret);
2114 memset(¶ms, 0, sizeof(params));
2115 params.match = match;
2116 params.out_size = MRVL_PP2_AGGR_TXQD_MAX;
2117 ret = pp2_hif_init(¶ms, &hifs[i]);
2119 RTE_LOG(ERR, PMD, "Failed to initialize hif %d\n", i);
2128 * Deinitialize per-lcore MUSDK hardware interfaces (hifs).
2131 mrvl_deinit_hifs(void)
2135 RTE_LCORE_FOREACH(i) {
2137 pp2_hif_deinit(hifs[i]);
2141 static void mrvl_set_first_last_cores(int core_id)
2143 if (core_id < mrvl_lcore_first)
2144 mrvl_lcore_first = core_id;
2146 if (core_id > mrvl_lcore_last)
2147 mrvl_lcore_last = core_id;
2151 * DPDK callback to register the virtual device.
2154 * Pointer to the virtual device.
2157 * 0 on success, negative error value otherwise.
2160 rte_pmd_mrvl_probe(struct rte_vdev_device *vdev)
2162 struct rte_kvargs *kvlist;
2163 const char *ifnames[PP2_NUM_ETH_PPIO * PP2_NUM_PKT_PROC];
2165 uint32_t i, ifnum, cfgnum, core_id;
2168 params = rte_vdev_device_args(vdev);
2172 kvlist = rte_kvargs_parse(params, valid_args);
2176 ifnum = rte_kvargs_count(kvlist, MRVL_IFACE_NAME_ARG);
2177 if (ifnum > RTE_DIM(ifnames))
2178 goto out_free_kvlist;
2180 rte_kvargs_process(kvlist, MRVL_IFACE_NAME_ARG,
2181 mrvl_get_ifnames, &ifnames);
2183 cfgnum = rte_kvargs_count(kvlist, MRVL_CFG_ARG);
2185 RTE_LOG(ERR, PMD, "Cannot handle more than one config file!\n");
2186 goto out_free_kvlist;
2187 } else if (cfgnum == 1) {
2188 rte_kvargs_process(kvlist, MRVL_CFG_ARG,
2189 mrvl_get_qoscfg, &mrvl_qos_cfg);
2193 * ret == -EEXIST is correct, it means DMA
2194 * has been already initialized (by another PMD).
2196 ret = mv_sys_dma_mem_init(RTE_MRVL_MUSDK_DMA_MEMSIZE);
2197 if (ret < 0 && ret != -EEXIST)
2198 goto out_free_kvlist;
2200 ret = mrvl_init_pp2();
2202 RTE_LOG(ERR, PMD, "Failed to init PP!\n");
2203 goto out_deinit_dma;
2206 ret = mrvl_init_hifs();
2208 goto out_deinit_hifs;
2210 for (i = 0; i < ifnum; i++) {
2211 RTE_LOG(INFO, PMD, "Creating %s\n", ifnames[i]);
2212 ret = mrvl_eth_dev_create(vdev, ifnames[i]);
2217 rte_kvargs_free(kvlist);
2219 memset(mrvl_port_bpool_size, 0, sizeof(mrvl_port_bpool_size));
2221 mrvl_lcore_first = RTE_MAX_LCORE;
2222 mrvl_lcore_last = 0;
2224 RTE_LCORE_FOREACH(core_id) {
2225 mrvl_set_first_last_cores(core_id);
2231 mrvl_eth_dev_destroy(ifnames[i]);
2236 mv_sys_dma_mem_destroy();
2238 rte_kvargs_free(kvlist);
2244 * DPDK callback to remove virtual device.
2247 * Pointer to the removed virtual device.
2250 * 0 on success, negative error value otherwise.
2253 rte_pmd_mrvl_remove(struct rte_vdev_device *vdev)
2258 name = rte_vdev_device_name(vdev);
2262 RTE_LOG(INFO, PMD, "Removing %s\n", name);
2264 for (i = 0; i < rte_eth_dev_count(); i++) {
2265 char ifname[RTE_ETH_NAME_MAX_LEN];
2267 rte_eth_dev_get_name_by_port(i, ifname);
2268 mrvl_eth_dev_destroy(ifname);
2273 mv_sys_dma_mem_destroy();
2278 static struct rte_vdev_driver pmd_mrvl_drv = {
2279 .probe = rte_pmd_mrvl_probe,
2280 .remove = rte_pmd_mrvl_remove,
2283 RTE_PMD_REGISTER_VDEV(net_mrvl, pmd_mrvl_drv);
2284 RTE_PMD_REGISTER_ALIAS(net_mrvl, eth_mrvl);