X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_ethdev.c;h=73d17f7b3c61bac25b0f96fb907be9793413c1a6;hb=d61138d4f0e2927cc1d0fef6d810fa7f5d1161a1;hp=3fbc82977ac7327f13c297468af387ca7e0acd74;hpb=6a556bd6caa1e06942391e21e68b81c56e52308f;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 3fbc82977a..73d17f7b3c 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -1,7 +1,7 @@ /* * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016 NXP + * Copyright 2016-2021 NXP * */ @@ -9,7 +9,7 @@ #include #include -#include +#include #include #include #include @@ -26,41 +26,53 @@ #include #include #include "dpaa2_ethdev.h" +#include "dpaa2_sparser.h" #include -#define DRIVER_LOOPBACK_MODE "drv_looback" +#define DRIVER_LOOPBACK_MODE "drv_loopback" +#define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch" +#define DRIVER_TX_CONF "drv_tx_conf" +#define DRIVER_ERROR_QUEUE "drv_err_queue" +#define CHECK_INTERVAL 100 /* 100ms */ +#define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */ /* Supported Rx offloads */ static uint64_t dev_rx_offloads_sup = - DEV_RX_OFFLOAD_VLAN_STRIP | - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | - DEV_RX_OFFLOAD_VLAN_FILTER | - DEV_RX_OFFLOAD_JUMBO_FRAME; + RTE_ETH_RX_OFFLOAD_CHECKSUM | + RTE_ETH_RX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM | + RTE_ETH_RX_OFFLOAD_VLAN_STRIP | + RTE_ETH_RX_OFFLOAD_VLAN_FILTER | + RTE_ETH_RX_OFFLOAD_TIMESTAMP; /* Rx offloads which cannot be disabled */ static uint64_t dev_rx_offloads_nodis = - DEV_RX_OFFLOAD_SCATTER; + RTE_ETH_RX_OFFLOAD_RSS_HASH | + RTE_ETH_RX_OFFLOAD_SCATTER; /* Supported Tx offloads */ static uint64_t dev_tx_offloads_sup = - DEV_TX_OFFLOAD_VLAN_INSERT | - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + RTE_ETH_TX_OFFLOAD_VLAN_INSERT | + RTE_ETH_TX_OFFLOAD_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_UDP_CKSUM | + RTE_ETH_TX_OFFLOAD_TCP_CKSUM | + RTE_ETH_TX_OFFLOAD_SCTP_CKSUM | + RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM | + RTE_ETH_TX_OFFLOAD_MT_LOCKFREE | + RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; /* Tx offloads which cannot be disabled */ static uint64_t dev_tx_offloads_nodis = - DEV_TX_OFFLOAD_MULTI_SEGS | - DEV_TX_OFFLOAD_MT_LOCKFREE | - DEV_TX_OFFLOAD_MBUF_FAST_FREE; + RTE_ETH_TX_OFFLOAD_MULTI_SEGS; /* enable timestamp in mbuf */ -enum pmd_dpaa2_ts dpaa2_enable_ts; +bool dpaa2_enable_ts[RTE_MAX_ETHPORTS]; +uint64_t dpaa2_timestamp_rx_dynflag; +int dpaa2_timestamp_dynfield_offset = -1; + +/* Enable error queue */ +bool dpaa2_enable_err_queue; struct rte_dpaa2_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; @@ -82,38 +94,23 @@ static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { {"ingress_nobuffer_discards", 2, 2}, {"egress_discarded_frames", 2, 3}, {"egress_confirmed_frames", 2, 4}, -}; - -static const enum rte_filter_op dpaa2_supported_filter_ops[] = { - RTE_ETH_FILTER_ADD, - RTE_ETH_FILTER_DELETE, - RTE_ETH_FILTER_UPDATE, - RTE_ETH_FILTER_FLUSH, - RTE_ETH_FILTER_GET + {"cgr_reject_frames", 4, 0}, + {"cgr_reject_bytes", 4, 1}, }; static struct rte_dpaa2_driver rte_dpaa2_pmd; -static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); static int dpaa2_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -int dpaa2_logtype_pmd; - -__rte_experimental void -rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable) -{ - dpaa2_enable_ts = enable; -} - static int dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -123,8 +120,8 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } if (on) - ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, - priv->token, vlan_id); + ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, + vlan_id, 0, 0, 0); else ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id); @@ -140,20 +137,20 @@ static int dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; - int ret; + struct fsl_mc_io *dpni = dev->process_private; + int ret = 0; PMD_INIT_FUNC_TRACE(); - if (mask & ETH_VLAN_FILTER_MASK) { + if (mask & RTE_ETH_VLAN_FILTER_MASK) { /* VLAN Filter not avaialble */ if (!priv->max_vlan_filters) { DPAA2_PMD_INFO("VLAN filter not available"); - goto next_mask; + return -ENOTSUP; } if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_FILTER) + RTE_ETH_RX_OFFLOAD_VLAN_FILTER) ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, priv->token, true); else @@ -162,14 +159,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (ret < 0) DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); } -next_mask: - if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_EXTEND) - DPAA2_PMD_INFO("VLAN extend offload not supported"); - } - return 0; + return ret; } static int @@ -178,7 +169,7 @@ dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, uint16_t tpid) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; int ret = -ENOTSUP; PMD_INIT_FUNC_TRACE(); @@ -216,8 +207,7 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev, size_t fw_size) { int ret; - struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; struct mc_soc_version mc_plat_info = {0}; struct mc_version mc_ver_info = {0}; @@ -235,23 +225,23 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev, mc_ver_info.major, mc_ver_info.minor, mc_ver_info.revision); + if (ret < 0) + return -EINVAL; ret += 1; /* add the size of '\0' */ - if (fw_size < (uint32_t)ret) + if (fw_size < (size_t)ret) return ret; else return 0; } -static void +static int dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct dpaa2_dev_priv *priv = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); - dev_info->if_index = priv->hw_id; - dev_info->max_mac_addrs = priv->max_mac_filters; dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; @@ -261,14 +251,102 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_rx_offloads_nodis; dev_info->tx_offload_capa = dev_tx_offloads_sup | dev_tx_offloads_nodis; - dev_info->speed_capa = ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_2_5G | - ETH_LINK_SPEED_10G; + dev_info->speed_capa = RTE_ETH_LINK_SPEED_1G | + RTE_ETH_LINK_SPEED_2_5G | + RTE_ETH_LINK_SPEED_10G; dev_info->max_hash_mac_addrs = 0; dev_info->max_vfs = 0; - dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->max_vmdq_pools = RTE_ETH_16_POOLS; dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; + + dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size; + /* same is rx size for best perf */ + dev_info->default_txportconf.burst_size = dpaa2_dqrr_size; + + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD; + dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC; + + if (dpaa2_svr_family == SVR_LX2160A) { + dev_info->speed_capa |= RTE_ETH_LINK_SPEED_25G | + RTE_ETH_LINK_SPEED_40G | + RTE_ETH_LINK_SPEED_50G | + RTE_ETH_LINK_SPEED_100G; + } + + return 0; +} + +static int +dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + int ret = -EINVAL; + unsigned int i; + const struct burst_info { + uint64_t flags; + const char *output; + } rx_offload_map[] = { + {RTE_ETH_RX_OFFLOAD_CHECKSUM, " Checksum,"}, + {RTE_ETH_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, + {RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, + {RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"}, + {RTE_ETH_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"}, + {RTE_ETH_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"}, + {RTE_ETH_RX_OFFLOAD_TIMESTAMP, " Timestamp,"}, + {RTE_ETH_RX_OFFLOAD_RSS_HASH, " RSS,"}, + {RTE_ETH_RX_OFFLOAD_SCATTER, " Scattered,"} + }; + + /* Update Rx offload info */ + for (i = 0; i < RTE_DIM(rx_offload_map); i++) { + if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) { + snprintf(mode->info, sizeof(mode->info), "%s", + rx_offload_map[i].output); + ret = 0; + break; + } + } + return ret; +} + +static int +dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + int ret = -EINVAL; + unsigned int i; + const struct burst_info { + uint64_t flags; + const char *output; + } tx_offload_map[] = { + {RTE_ETH_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"}, + {RTE_ETH_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, + {RTE_ETH_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, + {RTE_ETH_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, + {RTE_ETH_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, + {RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, + {RTE_ETH_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, + {RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, + {RTE_ETH_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} + }; + + /* Update Tx offload info */ + for (i = 0; i < RTE_DIM(tx_offload_map); i++) { + if (eth_conf->txmode.offloads & tx_offload_map[i].flags) { + snprintf(mode->info, sizeof(mode->info), "%s", + tx_offload_map[i].output); + ret = 0; + break; + } + } + return ret; } static int @@ -286,7 +364,10 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); - tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; + if (priv->flags & DPAA2_TX_CONF_ENABLE) + tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; + else + tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, RTE_CACHE_LINE_SIZE); if (!mc_q) { @@ -310,6 +391,25 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) goto fail; } + if (dpaa2_enable_err_queue) { + priv->rx_err_vq = rte_zmalloc("dpni_rx_err", + sizeof(struct dpaa2_queue), 0); + + dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; + dpaa2_q->q_storage = rte_malloc("err_dq_storage", + sizeof(struct queue_storage_info_t) * + RTE_MAX_LCORE, + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + for (i = 0; i < RTE_MAX_LCORE; i++) + if (dpaa2_alloc_dq_storage(&dpaa2_q->q_storage[i])) + goto fail; + } + for (i = 0; i < priv->nb_tx_queues; i++) { mc_q->eth_data = dev->data; mc_q->flow_id = 0xffff; @@ -321,6 +421,28 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) goto fail_tx; } + if (priv->flags & DPAA2_TX_CONF_ENABLE) { + /*Setup tx confirmation queues*/ + for (i = 0; i < priv->nb_tx_queues; i++) { + mc_q->eth_data = dev->data; + mc_q->tc_index = i; + mc_q->flow_id = 0; + priv->tx_conf_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; + dpaa2_q->q_storage = + rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail_tx_conf; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + goto fail_tx_conf; + } + } + vq_id = 0; for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; @@ -330,6 +452,14 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) } return 0; +fail_tx_conf: + i -= 1; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; + rte_free(dpaa2_q->q_storage); + priv->tx_conf_vq[i--] = NULL; + } + i = priv->nb_tx_queues; fail_tx: i -= 1; while (i >= 0) { @@ -347,6 +477,14 @@ fail: rte_free(dpaa2_q->q_storage); priv->rx_vq[i--] = NULL; } + + if (dpaa2_enable_err_queue) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; + if (dpaa2_q->q_storage) + dpaa2_free_dq_storage(dpaa2_q->q_storage); + rte_free(dpaa2_q->q_storage); + } + rte_free(mc_q); return -1; } @@ -373,6 +511,14 @@ dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; rte_free(dpaa2_q->cscn); } + if (priv->flags & DPAA2_TX_CONF_ENABLE) { + /* cleanup tx conf queue storage */ + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *) + priv->tx_conf_vq[i]; + rte_free(dpaa2_q->q_storage); + } + } /*free memory for all queues (RX+TX) */ rte_free(priv->rx_vq[0]); priv->rx_vq[0] = NULL; @@ -383,7 +529,7 @@ static int dpaa2_eth_dev_configure(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; struct rte_eth_conf *eth_conf = &dev->data->dev_conf; uint64_t rx_offloads = eth_conf->rxmode.offloads; uint64_t tx_offloads = eth_conf->txmode.offloads; @@ -391,55 +537,62 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) int rx_l4_csum_offload = false; int tx_l3_csum_offload = false; int tx_l4_csum_offload = false; - int ret; + int ret, tc_index; + uint32_t max_rx_pktlen; PMD_INIT_FUNC_TRACE(); - /* Rx offloads validation */ + /* Rx offloads which are enabled by default */ if (dev_rx_offloads_nodis & ~rx_offloads) { - DPAA2_PMD_WARN( - "Rx offloads non configurable - requested 0x%" PRIx64 - " ignored 0x%" PRIx64, - rx_offloads, dev_rx_offloads_nodis); + DPAA2_PMD_INFO( + "Some of rx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); } - /* Tx offloads validation */ + /* Tx offloads which are enabled by default */ if (dev_tx_offloads_nodis & ~tx_offloads) { - DPAA2_PMD_WARN( - "Tx offloads non configurable - requested 0x%" PRIx64 - " ignored 0x%" PRIx64, - tx_offloads, dev_tx_offloads_nodis); + DPAA2_PMD_INFO( + "Some of tx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + max_rx_pktlen = eth_conf->rxmode.mtu + RTE_ETHER_HDR_LEN + + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; + if (max_rx_pktlen <= DPAA2_MAX_RX_PKT_LEN) { + ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, + priv->token, max_rx_pktlen - RTE_ETHER_CRC_LEN); + if (ret != 0) { + DPAA2_PMD_ERR("Unable to set mtu. check config"); + return ret; + } + DPAA2_PMD_INFO("MTU configured for the device: %d", + dev->data->mtu); + } else { + return -1; } - if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { - if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { - ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, - priv->token, eth_conf->rxmode.max_rx_pkt_len); + if (eth_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) { + for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { + ret = dpaa2_setup_flow_dist(dev, + eth_conf->rx_adv_conf.rss_conf.rss_hf, + tc_index); if (ret) { DPAA2_PMD_ERR( - "Unable to set mtu. check config"); + "Unable to set flow distribution on tc%d." + "Check queue config", tc_index); return ret; } - } else { - return -1; } } - if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { - ret = dpaa2_setup_flow_dist(dev, - eth_conf->rx_adv_conf.rss_conf.rss_hf); - if (ret) { - DPAA2_PMD_ERR("Unable to set flow distribution." - "Check queue config"); - return ret; - } - } - - if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + if (rx_offloads & RTE_ETH_RX_OFFLOAD_IPV4_CKSUM) rx_l3_csum_offload = true; - if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || - (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM)) + if ((rx_offloads & RTE_ETH_RX_OFFLOAD_UDP_CKSUM) || + (rx_offloads & RTE_ETH_RX_OFFLOAD_TCP_CKSUM) || + (rx_offloads & RTE_ETH_RX_OFFLOAD_SCTP_CKSUM)) rx_l4_csum_offload = true; ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, @@ -456,12 +609,26 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) return ret; } - if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) +#if !defined(RTE_LIBRTE_IEEE1588) + if (rx_offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) +#endif + { + ret = rte_mbuf_dyn_rx_timestamp_register( + &dpaa2_timestamp_dynfield_offset, + &dpaa2_timestamp_rx_dynflag); + if (ret != 0) { + DPAA2_PMD_ERR("Error to register timestamp field/flag"); + return -rte_errno; + } + dpaa2_enable_ts[dev->data->port_id] = true; + } + + if (tx_offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) tx_l3_csum_offload = true; - if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || - (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || - (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + if ((tx_offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) || + (tx_offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) || + (tx_offloads & RTE_ETH_TX_OFFLOAD_SCTP_CKSUM)) tx_l4_csum_offload = true; ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, @@ -493,11 +660,10 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) } } - if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) - dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); + if (rx_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER) + dpaa2_vlan_offload_set(dev, RTE_ETH_VLAN_FILTER_MASK); - /* update the current status */ - dpaa2_dev_link_update(dev, 0); + dpaa2_tm_init(dev); return 0; } @@ -508,25 +674,32 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) static int dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, - uint16_t nb_rx_desc __rte_unused, + uint16_t nb_rx_desc, unsigned int socket_id __rte_unused, - const struct rte_eth_rxconf *rx_conf __rte_unused, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_q; struct dpni_queue cfg; uint8_t options = 0; uint8_t flow_id; uint32_t bpid; - int ret; + int i, ret; PMD_INIT_FUNC_TRACE(); DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", dev, rx_queue_id, mb_pool, rx_conf); + /* Rx deferred start is not supported */ + if (rx_conf->rx_deferred_start) { + DPAA2_PMD_ERR("%p:Rx deferred start not supported", + (void *)dev); + return -EINVAL; + } + if (!priv->bp_list || priv->bp_list->mp != mb_pool) { bpid = mempool_to_bpid(mb_pool); ret = dpaa2_attach_bp_list(priv, @@ -537,14 +710,32 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ dpaa2_q->bp_array = rte_dpaa2_bpid_info; + dpaa2_q->nb_desc = UINT16_MAX; + dpaa2_q->offloads = rx_conf->offloads; /*Get the flow id from given VQ id*/ - flow_id = rx_queue_id % priv->nb_rx_queues; + flow_id = dpaa2_q->flow_id; memset(&cfg, 0, sizeof(struct dpni_queue)); options = options | DPNI_QUEUE_OPT_USER_CTX; cfg.user_context = (size_t)(dpaa2_q); + /* check if a private cgr available. */ + for (i = 0; i < priv->max_cgs; i++) { + if (!priv->cgid_in_use[i]) { + priv->cgid_in_use[i] = 1; + break; + } + } + + if (i < priv->max_cgs) { + options |= DPNI_QUEUE_OPT_SET_CGID; + cfg.cgid = i; + dpaa2_q->cgid = cfg.cgid; + } else { + dpaa2_q->cgid = 0xff; + } + /*if ls2088 or rev2 device, enable the stashing */ if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { @@ -573,15 +764,56 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, struct dpni_taildrop taildrop; taildrop.enable = 1; - /*enabling per rx queue congestion control */ - taildrop.threshold = CONG_THRESHOLD_RX_Q; - taildrop.units = DPNI_CONGESTION_UNIT_BYTES; - taildrop.oal = CONG_RX_OAL; - DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", - rx_queue_id); - ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + dpaa2_q->nb_desc = nb_rx_desc; + /* Private CGR will use tail drop length as nb_rx_desc. + * for rest cases we can use standard byte based tail drop. + * There is no HW restriction, but number of CGRs are limited, + * hence this restriction is placed. + */ + if (dpaa2_q->cgid != 0xff) { + /*enabling per rx queue congestion control */ + taildrop.threshold = nb_rx_desc; + taildrop.units = DPNI_CONGESTION_UNIT_FRAMES; + taildrop.oal = 0; + DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d", + rx_queue_id); + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_CONGESTION_GROUP, + DPNI_QUEUE_RX, + dpaa2_q->tc_index, + dpaa2_q->cgid, &taildrop); + } else { + /*enabling per rx queue congestion control */ + taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q; + taildrop.units = DPNI_CONGESTION_UNIT_BYTES; + taildrop.oal = CONG_RX_OAL; + DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d", + rx_queue_id); + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_QUEUE, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, + &taildrop); + } + if (ret) { + DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", + ret); + return -1; + } + } else { /* Disable tail Drop */ + struct dpni_taildrop taildrop = {0}; + DPAA2_PMD_INFO("Tail drop is disabled on queue"); + + taildrop.enable = 0; + if (dpaa2_q->cgid != 0xff) { + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, + dpaa2_q->tc_index, + dpaa2_q->cgid, &taildrop); + } else { + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, DPNI_CP_QUEUE, DPNI_QUEUE_RX, dpaa2_q->tc_index, flow_id, &taildrop); + } if (ret) { DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", ret); @@ -596,22 +828,35 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, static int dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, - uint16_t nb_tx_desc __rte_unused, + uint16_t nb_tx_desc, unsigned int socket_id __rte_unused, - const struct rte_eth_txconf *tx_conf __rte_unused) + const struct rte_eth_txconf *tx_conf) { struct dpaa2_dev_priv *priv = dev->data->dev_private; struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) priv->tx_vq[tx_queue_id]; - struct fsl_mc_io *dpni = priv->hw; + struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) + priv->tx_conf_vq[tx_queue_id]; + struct fsl_mc_io *dpni = dev->process_private; struct dpni_queue tx_conf_cfg; struct dpni_queue tx_flow_cfg; uint8_t options = 0, flow_id; + struct dpni_queue_id qid; uint32_t tc_id; int ret; PMD_INIT_FUNC_TRACE(); + /* Tx deferred start is not supported */ + if (tx_conf->tx_deferred_start) { + DPAA2_PMD_ERR("%p:Tx deferred start not supported", + (void *)dev); + return -EINVAL; + } + + dpaa2_q->nb_desc = UINT16_MAX; + dpaa2_q->offloads = tx_conf->offloads; + /* Return if queue already configured */ if (dpaa2_q->flow_id != 0xffff) { dev->data->tx_queues[tx_queue_id] = dpaa2_q; @@ -625,11 +870,11 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, flow_id = 0; ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, - tc_id, flow_id, options, &tx_flow_cfg); + tc_id, flow_id, options, &tx_flow_cfg); if (ret) { DPAA2_PMD_ERR("Error in setting the tx flow: " - "tc_id=%d, flow=%d err=%d", - tc_id, flow_id, ret); + "tc_id=%d, flow=%d err=%d", + tc_id, flow_id, ret); return -1; } @@ -637,9 +882,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, if (tx_queue_id == 0) { /*Set tx-conf and error configuration*/ - ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, - priv->token, - DPNI_CONF_DISABLE); + if (priv->flags & DPAA2_TX_CONF_ENABLE) + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_AFFINE); + else + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_DISABLE); if (ret) { DPAA2_PMD_ERR("Error in set tx conf mode settings: " "err=%d", ret); @@ -648,15 +898,26 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, } dpaa2_q->tc_index = tc_id; + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, dpaa2_q->tc_index, + dpaa2_q->flow_id, &tx_flow_cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); + return -1; + } + dpaa2_q->fqid = qid.fqid; + if (!(priv->flags & DPAA2_TX_CGR_OFF)) { - struct dpni_congestion_notification_cfg cong_notif_cfg; + struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; + + dpaa2_q->nb_desc = nb_tx_desc; cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; - cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; + cong_notif_cfg.threshold_entry = nb_tx_desc; /* Notify that the queue is not congested when the data in - * the queue is below this thershold. + * the queue is below this thershold.(90% of value) */ - cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; + cong_notif_cfg.threshold_exit = (nb_tx_desc * 9) / 10; cong_notif_cfg.message_ctx = 0; cong_notif_cfg.message_iova = (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); @@ -665,6 +926,7 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | DPNI_CONG_OPT_COHERENT_WRITE; + cong_notif_cfg.cg_point = DPNI_CP_QUEUE; ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, priv->token, @@ -680,48 +942,89 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, } dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; dev->data->tx_queues[tx_queue_id] = dpaa2_q; + + if (priv->flags & DPAA2_TX_CONF_ENABLE) { + dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q; + options = options | DPNI_QUEUE_OPT_USER_CTX; + tx_conf_cfg.user_context = (size_t)(dpaa2_q); + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the tx conf flow: " + "tc_index=%d, flow=%d err=%d", + dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, ret); + return -1; + } + + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); + return -1; + } + dpaa2_tx_conf_q->fqid = qid.fqid; + } return 0; } static void -dpaa2_dev_rx_queue_release(void *q __rte_unused) +dpaa2_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) { - PMD_INIT_FUNC_TRACE(); -} + struct dpaa2_queue *dpaa2_q = dev->data->rx_queues[rx_queue_id]; + struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; + struct fsl_mc_io *dpni = + (struct fsl_mc_io *)priv->eth_dev->process_private; + uint8_t options = 0; + int ret; + struct dpni_queue cfg; -static void -dpaa2_dev_tx_queue_release(void *q __rte_unused) -{ + memset(&cfg, 0, sizeof(struct dpni_queue)); PMD_INIT_FUNC_TRACE(); + if (dpaa2_q->cgid != 0xff) { + options = DPNI_QUEUE_OPT_CLEAR_CGID; + cfg.cgid = dpaa2_q->cgid; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, + dpaa2_q->tc_index, dpaa2_q->flow_id, + options, &cfg); + if (ret) + DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d", + dpaa2_q->fqid, ret); + priv->cgid_in_use[dpaa2_q->cgid] = 0; + dpaa2_q->cgid = 0xff; + } } static uint32_t -dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +dpaa2_dev_rx_queue_count(void *rx_queue) { int32_t ret; - struct dpaa2_dev_priv *priv = dev->data->dev_private; struct dpaa2_queue *dpaa2_q; struct qbman_swp *swp; struct qbman_fq_query_np_rslt state; uint32_t frame_cnt = 0; - PMD_INIT_FUNC_TRACE(); - if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - DPAA2_PMD_ERR("Failure in affining portal"); + DPAA2_PMD_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); return -EINVAL; } } swp = DPAA2_PER_LCORE_PORTAL; - dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; + dpaa2_q = rx_queue; if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { frame_cnt = qbman_fq_state_frame_count(&state); - DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", - rx_queue_id, frame_cnt); + DPAA2_PMD_DP_DEBUG("RX frame count for q(%p) is %u", + rx_queue, frame_cnt); } return frame_cnt; } @@ -744,6 +1047,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) }; if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx || + dev->rx_pkt_burst == dpaa2_dev_rx || dev->rx_pkt_burst == dpaa2_dev_loopback_rx) return ptypes; return NULL; @@ -763,7 +1067,7 @@ dpaa2_interrupt_handler(void *param) { struct rte_eth_dev *dev = param; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; int irq_index = DPNI_IRQ_INDEX; unsigned int status = 0, clear = 0; @@ -787,8 +1091,7 @@ dpaa2_interrupt_handler(void *param) clear = DPNI_IRQ_EVENT_LINK_CHANGED; dpaa2_dev_link_update(dev, 0); /* calling all the apps registered for link status event */ - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } out: ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, @@ -802,7 +1105,7 @@ dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) { int err = 0; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int irq_index = DPNI_IRQ_INDEX; unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; @@ -832,7 +1135,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) struct rte_dpaa2_device *dpaa2_dev; struct rte_eth_dev_data *data = dev->data; struct dpaa2_dev_priv *priv = data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpni_queue cfg; struct dpni_error_cfg err_cfg; uint16_t qdid; @@ -842,7 +1145,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) struct rte_intr_handle *intr_handle; dpaa2_dev = container_of(rdev, struct rte_dpaa2_device, device); - intr_handle = &dpaa2_dev->intr_handle; + intr_handle = dpaa2_dev->intr_handle; PMD_INIT_FUNC_TRACE(); @@ -877,11 +1180,31 @@ dpaa2_dev_start(struct rte_eth_dev *dev) dpaa2_q->fqid = qid.fqid; } - /*checksum errors, send them to normal path and set it in annotation */ - err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; - err_cfg.errors |= DPNI_ERROR_PHE; + if (dpaa2_enable_err_queue) { + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX_ERR, 0, 0, &cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error getting rx err flow information: err=%d", + ret); + return ret; + } + dpaa2_q = (struct dpaa2_queue *)priv->rx_err_vq; + dpaa2_q->fqid = qid.fqid; + dpaa2_q->eth_data = dev->data; + + err_cfg.errors = DPNI_ERROR_DISC; + err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; + } else { + /* checksum errors, send them to normal path + * and set it in annotation + */ + err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; - err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; + /* if packet with parse error are not to be dropped */ + err_cfg.errors |= DPNI_ERROR_PHE; + + err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; + } err_cfg.set_frame_annotation = true; ret = dpni_set_errors_behavior(dpni, CMD_PRI_LOW, @@ -893,8 +1216,8 @@ dpaa2_dev_start(struct rte_eth_dev *dev) } /* if the interrupts were configured on this devices*/ - if (intr_handle && (intr_handle->fd) && - (dev->data->dev_conf.intr_conf.lsc != 0)) { + if (intr_handle && rte_intr_fd_get(intr_handle) && + dev->data->dev_conf.intr_conf.lsc != 0) { /* Registering LSC interrupt handler */ rte_intr_callback_register(intr_handle, dpaa2_interrupt_handler, @@ -921,11 +1244,11 @@ dpaa2_dev_start(struct rte_eth_dev *dev) * This routine disables all traffic on the adapter by issuing a * global reset on the MAC. */ -static void +static int dpaa2_dev_stop(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; struct rte_eth_link link; struct rte_intr_handle *intr_handle = dev->intr_handle; @@ -933,8 +1256,8 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* reset interrupt callback */ - if (intr_handle && (intr_handle->fd) && - (dev->data->dev_conf.intr_conf.lsc != 0)) { + if (intr_handle && rte_intr_fd_get(intr_handle) && + dev->data->dev_conf.intr_conf.lsc != 0) { /*disable dpni irqs */ dpaa2_eth_setup_irqs(dev, 0); @@ -953,50 +1276,83 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) if (ret) { DPAA2_PMD_ERR("Failure (ret %d) in disabling dpni %d dev", ret, priv->hw_id); - return; + return ret; } /* clear the recorded link status */ memset(&link, 0, sizeof(link)); rte_eth_linkstatus_set(dev, &link); + + return 0; } -static void +static int dpaa2_dev_close(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - int ret; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int i, ret; struct rte_eth_link link; PMD_INIT_FUNC_TRACE(); - dpaa2_flow_clean(dev); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (!dpni) { + DPAA2_PMD_WARN("Already closed or not started"); + return -1; + } + dpaa2_tm_deinit(dev); + dpaa2_flow_clean(dev); /* Clean the device first */ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); if (ret) { DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); - return; + return -1; } memset(&link, 0, sizeof(link)); rte_eth_linkstatus_set(dev, &link); + + /* Free private queues memory */ + dpaa2_free_rx_tx_queues(dev); + /* Close the device at underlying layer*/ + ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure closing dpni device with err code %d", + ret); + } + + /* Free the allocated memory for ethernet private data and dpni*/ + priv->hw = NULL; + dev->process_private = NULL; + rte_free(dpni); + + for (i = 0; i < MAX_TCS; i++) + rte_free((void *)(size_t)priv->extract.tc_extract_param[i]); + + if (priv->extract.qos_extract_param) + rte_free((void *)(size_t)priv->extract.qos_extract_param); + + DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name); + return 0; } -static void +static int dpaa2_dev_promiscuous_enable( struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -ENODEV; } ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); @@ -1006,21 +1362,23 @@ dpaa2_dev_promiscuous_enable( ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); + + return ret; } -static void +static int dpaa2_dev_promiscuous_disable( struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -ENODEV; } ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); @@ -1034,49 +1392,55 @@ dpaa2_dev_promiscuous_disable( DPAA2_PMD_ERR("Unable to disable M promisc mode %d", ret); } + + return ret; } -static void +static int dpaa2_dev_allmulticast_enable( struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -ENODEV; } ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); + + return ret; } -static void +static int dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -ENODEV; } /* must remain on for all promiscuous */ if (dev->data->promiscuous == 1) - return; + return 0; ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); + + return ret; } static int @@ -1084,8 +1448,8 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; PMD_INIT_FUNC_TRACE(); @@ -1095,24 +1459,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; } - /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) - return -EINVAL; - - if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.offloads &= - DEV_RX_OFFLOAD_JUMBO_FRAME; - else - dev->data->dev_conf.rxmode.offloads &= - ~DEV_RX_OFFLOAD_JUMBO_FRAME; - - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - /* Set the Max Rx frame length as 'mtu' + * Maximum Ethernet header length */ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, - frame_size); + frame_size - RTE_ETHER_CRC_LEN); if (ret) { DPAA2_PMD_ERR("Setting the max frame length failed"); return -1; @@ -1123,13 +1474,13 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr, + struct rte_ether_addr *addr, __rte_unused uint32_t index, __rte_unused uint32_t pool) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1138,8 +1489,8 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, return -1; } - ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, - priv->token, addr->addr_bytes); + ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, + addr->addr_bytes, 0, 0, 0); if (ret) DPAA2_PMD_ERR( "error: Adding the MAC ADDR failed: err = %d", ret); @@ -1152,9 +1503,9 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct rte_eth_dev_data *data = dev->data; - struct ether_addr *macaddr; + struct rte_ether_addr *macaddr; PMD_INIT_FUNC_TRACE(); @@ -1174,11 +1525,11 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, static int dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr) + struct rte_ether_addr *addr) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -1202,7 +1553,7 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int32_t retcode; uint8_t page0 = 0, page1 = 1, page2 = 2; union dpni_statistics value; @@ -1282,9 +1633,9 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int32_t retcode; - union dpni_statistics value[3] = {}; + union dpni_statistics value[5] = {}; unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); if (n < num) @@ -1311,6 +1662,18 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, if (retcode) goto err; + for (i = 0; i < priv->max_cgs; i++) { + if (!priv->cgid_in_use[i]) { + /* Get Counters from page_4*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, + priv->token, + 4, 0, &value[4]); + if (retcode) + goto err; + break; + } + } + for (i = 0; i < num; i++) { xstats[i].id = i; xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. @@ -1334,10 +1697,9 @@ dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) - snprintf(xstats_names[i].name, - sizeof(xstats_names[i].name), - "%s", - dpaa2_xstats_strings[i].name); + strlcpy(xstats_names[i].name, + dpaa2_xstats_strings[i].name, + sizeof(xstats_names[i].name)); return stat_cnt; } @@ -1351,9 +1713,10 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, if (!ids) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = + (struct fsl_mc_io *)dev->process_private; int32_t retcode; - union dpni_statistics value[3] = {}; + union dpni_statistics value[5] = {}; if (n < stat_cnt) return stat_cnt; @@ -1379,6 +1742,12 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, if (retcode) return 0; + /* Get Counters from page_4*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 4, 0, &value[4]); + if (retcode) + return 0; + for (i = 0; i < stat_cnt; i++) { values[i] = value[dpaa2_xstats_strings[i].page_id]. raw.counter[dpaa2_xstats_strings[i].stats_id]; @@ -1401,8 +1770,8 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, static int dpaa2_xstats_get_names_by_id( struct rte_eth_dev *dev, - struct rte_eth_xstat_name *xstats_names, const uint64_t *ids, + struct rte_eth_xstat_name *xstats_names, unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa2_xstats_strings); @@ -1423,12 +1792,12 @@ dpaa2_xstats_get_names_by_id( return limit; } -static void +static int dpaa2_dev_stats_reset(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - int32_t retcode; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int retcode; int i; struct dpaa2_queue *dpaa2_q; @@ -1436,7 +1805,7 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev) if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -EINVAL; } retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); @@ -1456,33 +1825,42 @@ dpaa2_dev_stats_reset(struct rte_eth_dev *dev) dpaa2_q->tx_pkts = 0; } - return; + return 0; error: DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); - return; + return retcode; }; /* return 0 means link status changed, -1 means not changed */ static int dpaa2_dev_link_update(struct rte_eth_dev *dev, - int wait_to_complete __rte_unused) + int wait_to_complete) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct rte_eth_link link; struct dpni_link_state state = {0}; + uint8_t count; if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); return 0; } - ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); - if (ret < 0) { - DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); - return -1; + for (count = 0; count <= MAX_REPEAT_TIME; count++) { + ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, + &state); + if (ret < 0) { + DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); + return -1; + } + if (state.up == RTE_ETH_LINK_DOWN && + wait_to_complete) + rte_delay_ms(CHECK_INTERVAL); + else + break; } memset(&link, 0, sizeof(struct rte_eth_link)); @@ -1490,9 +1868,9 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, link.link_speed = state.rate; if (state.options & DPNI_LINK_OPT_HALF_DUPLEX) - link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_duplex = RTE_ETH_LINK_HALF_DUPLEX; else - link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX; ret = rte_eth_linkstatus_set(dev, &link); if (ret == -1) @@ -1518,7 +1896,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) struct dpni_link_state state = {0}; priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); @@ -1550,6 +1928,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) /* changing tx burst function to start enqueues */ dev->tx_pkt_burst = dpaa2_dev_tx; dev->data->dev_link.link_status = state.up; + dev->data->dev_link.link_speed = state.rate; if (state.up) DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); @@ -1574,7 +1953,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("Device has not yet been configured"); @@ -1628,7 +2007,7 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL || fc_conf == NULL) { DPAA2_PMD_ERR("device not configured"); @@ -1652,9 +2031,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * No TX side flow control (send Pause frame disabled) */ if (!(state.options & DPNI_LINK_OPT_ASYM_PAUSE)) - fc_conf->mode = RTE_FC_FULL; + fc_conf->mode = RTE_ETH_FC_FULL; else - fc_conf->mode = RTE_FC_RX_PAUSE; + fc_conf->mode = RTE_ETH_FC_RX_PAUSE; } else { /* DPNI_LINK_OPT_PAUSE not set * if ASYM_PAUSE set, @@ -1664,9 +2043,9 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Flow control disabled */ if (state.options & DPNI_LINK_OPT_ASYM_PAUSE) - fc_conf->mode = RTE_FC_TX_PAUSE; + fc_conf->mode = RTE_ETH_FC_TX_PAUSE; else - fc_conf->mode = RTE_FC_NONE; + fc_conf->mode = RTE_ETH_FC_NONE; } return ret; @@ -1684,7 +2063,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); @@ -1710,14 +2089,14 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) /* update cfg with fc_conf */ switch (fc_conf->mode) { - case RTE_FC_FULL: + case RTE_ETH_FC_FULL: /* Full flow control; * OPT_PAUSE set, ASYM_PAUSE not set */ cfg.options |= DPNI_LINK_OPT_PAUSE; cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; break; - case RTE_FC_TX_PAUSE: + case RTE_ETH_FC_TX_PAUSE: /* Enable RX flow control * OPT_PAUSE not set; * ASYM_PAUSE set; @@ -1725,7 +2104,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; cfg.options &= ~DPNI_LINK_OPT_PAUSE; break; - case RTE_FC_RX_PAUSE: + case RTE_ETH_FC_RX_PAUSE: /* Enable TX Flow control * OPT_PAUSE set * ASYM_PAUSE set @@ -1733,7 +2112,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) cfg.options |= DPNI_LINK_OPT_PAUSE; cfg.options |= DPNI_LINK_OPT_ASYM_PAUSE; break; - case RTE_FC_NONE: + case RTE_ETH_FC_NONE: /* Disable Flow control * OPT_PAUSE not set * ASYM_PAUSE not set @@ -1763,22 +2142,31 @@ dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct rte_eth_dev_data *data = dev->data; + struct dpaa2_dev_priv *priv = data->dev_private; struct rte_eth_conf *eth_conf = &data->dev_conf; - int ret; + int ret, tc_index; PMD_INIT_FUNC_TRACE(); if (rss_conf->rss_hf) { - ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); - if (ret) { - DPAA2_PMD_ERR("Unable to set flow dist"); - return ret; + for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { + ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf, + tc_index); + if (ret) { + DPAA2_PMD_ERR("Unable to set flow dist on tc%d", + tc_index); + return ret; + } } } else { - ret = dpaa2_remove_flow_dist(dev, 0); - if (ret) { - DPAA2_PMD_ERR("Unable to remove flow dist"); - return ret; + for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { + ret = dpaa2_remove_flow_dist(dev, tc_index); + if (ret) { + DPAA2_PMD_ERR( + "Unable to remove flow dist on tc%d", + tc_index); + return ret; + } } } eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; @@ -1800,15 +2188,15 @@ dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, int eth_rx_queue_id, - uint16_t dpcon_id, + struct dpaa2_dpcon_dev *dpcon, const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) { struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; uint8_t flow_id = dpaa2_ethq->flow_id; struct dpni_queue cfg; - uint8_t options; + uint8_t options, priority; int ret; if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) @@ -1820,11 +2208,14 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, else return -EINVAL; + priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * + (dpcon->num_priorities - 1); + memset(&cfg, 0, sizeof(struct dpni_queue)); options = DPNI_QUEUE_OPT_DEST; cfg.destination.type = DPNI_DEST_DPCON; - cfg.destination.id = dpcon_id; - cfg.destination.priority = queue_conf->ev.priority; + cfg.destination.id = dpcon->dpcon_id; + cfg.destination.priority = priority; if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; @@ -1857,7 +2248,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token, dpaa2_ethq->tc_index, flow_id, - OPR_OPT_CREATE, &ocfg); + OPR_OPT_CREATE, &ocfg, 0); if (ret) { DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret); return ret; @@ -1885,7 +2276,7 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; uint8_t flow_id = dpaa2_ethq->flow_id; struct dpni_queue cfg; @@ -1904,45 +2295,82 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, return ret; } -static inline int -dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op) +static int +dpaa2_dev_flow_ops_get(struct rte_eth_dev *dev, + const struct rte_flow_ops **ops) { - unsigned int i; + if (!dev) + return -ENODEV; - for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) { - if (dpaa2_supported_filter_ops[i] == filter_op) - return 0; - } - return -ENOTSUP; + *ops = &dpaa2_flow_ops; + return 0; +} + +static void +dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct dpaa2_queue *rxq; + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + uint16_t max_frame_length; + + rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_desc; + if (dpni_get_max_frame_length(dpni, CMD_PRI_LOW, priv->token, + &max_frame_length) == 0) + qinfo->rx_buf_size = max_frame_length; + + qinfo->conf.rx_free_thresh = 1; + qinfo->conf.rx_drop_en = 1; + qinfo->conf.rx_deferred_start = 0; + qinfo->conf.offloads = rxq->offloads; +} + +static void +dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct dpaa2_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_desc; + qinfo->conf.tx_thresh.pthresh = 0; + qinfo->conf.tx_thresh.hthresh = 0; + qinfo->conf.tx_thresh.wthresh = 0; + + qinfo->conf.tx_free_thresh = 0; + qinfo->conf.tx_rs_thresh = 0; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = 0; } static int -dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev, - enum rte_filter_type filter_type, - enum rte_filter_op filter_op, - void *arg) +dpaa2_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *ops) { - int ret = 0; + *(const void **)ops = &dpaa2_tm_ops; - if (!dev) - return -ENODEV; + return 0; +} - switch (filter_type) { - case RTE_ETH_FILTER_GENERIC: - if (dpaa2_dev_verify_filter_ops(filter_op) < 0) { - ret = -ENOTSUP; - break; +void +rte_pmd_dpaa2_thread_init(void) +{ + int ret; + + if (unlikely(!DPAA2_PER_LCORE_DPIO)) { + ret = dpaa2_affine_qbman_swp(); + if (ret) { + DPAA2_PMD_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); + return; } - *(const void **)arg = &dpaa2_flow_ops; - dpaa2_filter_type |= filter_type; - break; - default: - RTE_LOG(ERR, PMD, "Filter type (%d) not supported", - filter_type); - ret = -ENOTSUP; - break; } - return ret; } static struct eth_dev_ops dpaa2_ethdev_ops = { @@ -1974,8 +2402,8 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { .rx_queue_setup = dpaa2_dev_rx_queue_setup, .rx_queue_release = dpaa2_dev_rx_queue_release, .tx_queue_setup = dpaa2_dev_tx_queue_setup, - .tx_queue_release = dpaa2_dev_tx_queue_release, - .rx_queue_count = dpaa2_dev_rx_queue_count, + .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get, + .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get, .flow_ctrl_get = dpaa2_flow_ctrl_get, .flow_ctrl_set = dpaa2_flow_ctrl_set, .mac_addr_add = dpaa2_dev_add_mac_addr, @@ -1983,7 +2411,19 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { .mac_addr_set = dpaa2_dev_set_mac_addr, .rss_hash_update = dpaa2_dev_rss_hash_update, .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, - .filter_ctrl = dpaa2_dev_flow_ctrl, + .flow_ops_get = dpaa2_dev_flow_ops_get, + .rxq_info_get = dpaa2_rxq_info_get, + .txq_info_get = dpaa2_txq_info_get, + .tm_ops_get = dpaa2_tm_ops_get, +#if defined(RTE_LIBRTE_IEEE1588) + .timesync_enable = dpaa2_timesync_enable, + .timesync_disable = dpaa2_timesync_disable, + .timesync_read_time = dpaa2_timesync_read_time, + .timesync_write_time = dpaa2_timesync_write_time, + .timesync_adjust_time = dpaa2_timesync_adjust_time, + .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp, +#endif }; /* Populate the mac address from physically available (u-boot/firmware) and/or @@ -1992,13 +2432,13 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { */ static int populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, - struct ether_addr *mac_entry) + struct rte_ether_addr *mac_entry) { int ret; - struct ether_addr phy_mac, prime_mac; + struct rte_ether_addr phy_mac, prime_mac; - memset(&phy_mac, 0, sizeof(struct ether_addr)); - memset(&prime_mac, 0, sizeof(struct ether_addr)); + memset(&phy_mac, 0, sizeof(struct rte_ether_addr)); + memset(&prime_mac, 0, sizeof(struct rte_ether_addr)); /* Get the physical device MAC address */ ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, @@ -2021,9 +2461,9 @@ populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, * If empty_mac(phy), return prime. * if both are empty, create random MAC, set as prime and return */ - if (!is_zero_ether_addr(&phy_mac)) { + if (!rte_is_zero_ether_addr(&phy_mac)) { /* If the addresses are not same, overwrite prime */ - if (!is_same_ether_addr(&phy_mac, &prime_mac)) { + if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) { ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, phy_mac.addr_bytes); @@ -2032,11 +2472,12 @@ populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, ret); goto cleanup; } - memcpy(&prime_mac, &phy_mac, sizeof(struct ether_addr)); + memcpy(&prime_mac, &phy_mac, + sizeof(struct rte_ether_addr)); } - } else if (is_zero_ether_addr(&prime_mac)) { + } else if (rte_is_zero_ether_addr(&prime_mac)) { /* In case phys and prime, both are zero, create random MAC */ - eth_random_addr(prime_mac.addr_bytes); + rte_eth_random_addr(prime_mac.addr_bytes); ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, prime_mac.addr_bytes); @@ -2047,7 +2488,7 @@ populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, } /* prime_mac the final MAC address */ - memcpy(mac_entry, &prime_mac, sizeof(struct ether_addr)); + memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr)); return 0; cleanup: @@ -2104,14 +2545,26 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); + dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); + if (!dpni_dev) { + DPAA2_PMD_ERR("Memory allocation failed for dpni device"); + return -1; + } + dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); + eth_dev->process_private = (void *)dpni_dev; + /* For secondary processes, the primary has done all the work */ if (rte_eal_process_type() != RTE_PROC_PRIMARY) { /* In case of secondary, only burst and ops API need to be * plugged. */ eth_dev->dev_ops = &dpaa2_ethdev_ops; + eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count; if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; + else if (dpaa2_get_devargs(dev->devargs, + DRIVER_NO_PREFETCH_MODE)) + eth_dev->rx_pkt_burst = dpaa2_dev_rx; else eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; eth_dev->tx_pkt_burst = dpaa2_dev_tx; @@ -2121,14 +2574,6 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); hw_id = dpaa2_dev->object_id; - - dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); - if (!dpni_dev) { - DPAA2_PMD_ERR("Memory allocation failed for dpni device"); - return -1; - } - - dpni_dev->regs = rte_mcp_ptr_list[0]; ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); if (ret) { DPAA2_PMD_ERR( @@ -2155,6 +2600,18 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) } priv->num_rx_tc = attr.num_rx_tcs; + priv->qos_entries = attr.qos_entries; + priv->fs_entries = attr.fs_entries; + priv->dist_queues = attr.num_queues; + + /* only if the custom CG is enabled */ + if (attr.options & DPNI_OPT_CUSTOM_CG) + priv->max_cgs = attr.num_cgs; + else + priv->max_cgs = 0; + + for (i = 0; i < priv->max_cgs; i++) + priv->cgid_in_use[i] = 0; for (i = 0; i < attr.num_rx_tcs; i++) priv->nb_rx_queues += attr.num_queues; @@ -2162,9 +2619,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* Using number of TX queues as number of TX TCs */ priv->nb_tx_queues = attr.num_tx_tcs; - DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", + DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d", priv->num_rx_tc, priv->nb_rx_queues, - priv->nb_tx_queues); + priv->nb_tx_queues, priv->max_cgs); priv->hw = dpni_dev; priv->hw_id = hw_id; @@ -2172,6 +2629,20 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) priv->max_mac_filters = attr.mac_filter_entries; priv->max_vlan_filters = attr.vlan_filter_entries; priv->flags = 0; +#if defined(RTE_LIBRTE_IEEE1588) + printf("DPDK IEEE1588 is enabled\n"); + priv->flags |= DPAA2_TX_CONF_ENABLE; +#endif + /* Used with ``fslmc:dpni.1,drv_tx_conf=1`` */ + if (dpaa2_get_devargs(dev->devargs, DRIVER_TX_CONF)) { + priv->flags |= DPAA2_TX_CONF_ENABLE; + DPAA2_PMD_INFO("TX_CONF Enabled"); + } + + if (dpaa2_get_devargs(dev->devargs, DRIVER_ERROR_QUEUE)) { + dpaa2_enable_err_queue = 1; + DPAA2_PMD_INFO("Enable error queue"); + } /* Allocate memory for hardware structure for queues */ ret = dpaa2_alloc_rx_tx_queues(eth_dev); @@ -2185,11 +2656,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) * can add MAC entries when rte_eth_dev_mac_addr_add is called. */ eth_dev->data->mac_addrs = rte_zmalloc("dpni", - ETHER_ADDR_LEN * attr.mac_filter_entries, 0); + RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0); if (eth_dev->data->mac_addrs == NULL) { DPAA2_PMD_ERR( "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * attr.mac_filter_entries); + RTE_ETHER_ADDR_LEN * attr.mac_filter_entries); ret = -ENOMEM; goto init_err; } @@ -2204,7 +2675,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* ... tx buffer layout ... */ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); - layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + if (priv->flags & DPAA2_TX_CONF_ENABLE) { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + layout.pass_timestamp = true; + } else { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + } layout.pass_frame_status = 1; ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, &layout); @@ -2215,7 +2692,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* ... tx-conf and error buffer layout ... */ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); - layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + if (priv->flags & DPAA2_TX_CONF_ENABLE) { + layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + layout.pass_timestamp = true; + } + layout.options |= DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; layout.pass_frame_status = 1; ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX_CONFIRM, &layout); @@ -2230,85 +2711,94 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) { eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; DPAA2_PMD_INFO("Loopback mode"); + } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) { + eth_dev->rx_pkt_burst = dpaa2_dev_rx; + DPAA2_PMD_INFO("No Prefetch mode"); } else { eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; } eth_dev->tx_pkt_burst = dpaa2_dev_tx; /*Init fields w.r.t. classficaition*/ - memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg)); + memset(&priv->extract.qos_key_extract, 0, + sizeof(struct dpaa2_key_extract)); priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); if (!priv->extract.qos_extract_param) { DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " " classificaiton ", ret); goto init_err; } + priv->extract.qos_key_extract.key_info.ipv4_src_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.qos_key_extract.key_info.ipv4_dst_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.qos_key_extract.key_info.ipv6_src_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.qos_key_extract.key_info.ipv6_dst_offset = + IP_ADDRESS_OFFSET_INVALID; + for (i = 0; i < MAX_TCS; i++) { - memset(&priv->extract.fs_key_cfg[i], 0, - sizeof(struct dpkg_profile_cfg)); - priv->extract.fs_extract_param[i] = + memset(&priv->extract.tc_key_extract[i], 0, + sizeof(struct dpaa2_key_extract)); + priv->extract.tc_extract_param[i] = (size_t)rte_malloc(NULL, 256, 64); - if (!priv->extract.fs_extract_param[i]) { + if (!priv->extract.tc_extract_param[i]) { DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton", ret); goto init_err; } + priv->extract.tc_key_extract[i].key_info.ipv4_src_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.tc_key_extract[i].key_info.ipv6_src_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset = + IP_ADDRESS_OFFSET_INVALID; + } + + ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, + RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE); + if (ret) { + DPAA2_PMD_ERR("Unable to set mtu. check config"); + goto init_err; } + /*TODO To enable soft parser support DPAA2 driver needs to integrate + * with external entity to receive byte code for software sequence + * and same will be offload to the H/W using MC interface. + * Currently it is assumed that DPAA2 driver has byte code by some + * mean and same if offloaded to H/W. + */ + if (getenv("DPAA2_ENABLE_SOFT_PARSER")) { + WRIOP_SS_INITIALIZER(priv); + ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); + if (ret < 0) { + DPAA2_PMD_ERR(" Error(%d) in loading softparser\n", + ret); + return ret; + } + + ret = dpaa2_eth_enable_wriop_soft_parser(priv, + DPNI_SS_INGRESS); + if (ret < 0) { + DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n", + ret); + return ret; + } + } RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); return 0; init_err: - dpaa2_dev_uninit(eth_dev); + dpaa2_dev_close(eth_dev); + return ret; } -static int -dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) +int dpaa2_dev_is_dpaa2(struct rte_eth_dev *dev) { - struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - int i, ret; - - PMD_INIT_FUNC_TRACE(); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - if (!dpni) { - DPAA2_PMD_WARN("Already closed or not started"); - return -1; - } - - dpaa2_dev_close(eth_dev); - - dpaa2_free_rx_tx_queues(eth_dev); - - /* Close the device at underlying layer*/ - ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); - if (ret) { - DPAA2_PMD_ERR( - "Failure closing dpni device with err code %d", - ret); - } - - /* Free the allocated memory for ethernet private data and dpni*/ - priv->hw = NULL; - rte_free(dpni); - - for (i = 0; i < MAX_TCS; i++) { - if (priv->extract.fs_extract_param[i]) - rte_free((void *)(size_t)priv->extract.fs_extract_param[i]); - } - - if (priv->extract.qos_extract_param) - rte_free((void *)(size_t)priv->extract.qos_extract_param); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); - return 0; + return dev->device->driver == &rte_dpaa2_pmd.driver; } static int @@ -2316,26 +2806,41 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, struct rte_dpaa2_device *dpaa2_dev) { struct rte_eth_dev *eth_dev; + struct dpaa2_dev_priv *dev_priv; int diag; + if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > + RTE_PKTMBUF_HEADROOM) { + DPAA2_PMD_ERR( + "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); + + return -1; + } + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); if (!eth_dev) return -ENODEV; - eth_dev->data->dev_private = rte_zmalloc( - "ethdev private structure", - sizeof(struct dpaa2_dev_priv), - RTE_CACHE_LINE_SIZE); - if (eth_dev->data->dev_private == NULL) { + dev_priv = rte_zmalloc("ethdev private structure", + sizeof(struct dpaa2_dev_priv), + RTE_CACHE_LINE_SIZE); + if (dev_priv == NULL) { DPAA2_PMD_CRIT( "Unable to allocate memory for private data"); rte_eth_dev_release_port(eth_dev); return -ENOMEM; } + eth_dev->data->dev_private = (void *)dev_priv; + /* Store a pointer to eth_dev in dev_private */ + dev_priv->eth_dev = eth_dev; } else { eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); - if (!eth_dev) + if (!eth_dev) { + DPAA2_PMD_DEBUG("returning enodev"); return -ENODEV; + } } eth_dev->device = &dpaa2_dev->device; @@ -2346,6 +2851,8 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, if (dpaa2_drv->drv_flags & RTE_DPAA2_DRV_INTR_LSC) eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC; + eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS; + /* Invoke PMD device initialization function */ diag = dpaa2_dev_init(eth_dev); if (diag == 0) { @@ -2361,13 +2868,13 @@ static int rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) { struct rte_eth_dev *eth_dev; + int ret; eth_dev = dpaa2_dev->eth_dev; - dpaa2_dev_uninit(eth_dev); - - rte_eth_dev_release_port(eth_dev); + dpaa2_dev_close(eth_dev); + ret = rte_eth_dev_release_port(eth_dev); - return 0; + return ret; } static struct rte_dpaa2_driver rte_dpaa2_pmd = { @@ -2377,12 +2884,10 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = { .remove = rte_dpaa2_remove, }; -RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); -RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2, - DRIVER_LOOPBACK_MODE "="); -RTE_INIT(dpaa2_pmd_init_log) -{ - dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); - if (dpaa2_logtype_pmd >= 0) - rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); -} +RTE_PMD_REGISTER_DPAA2(NET_DPAA2_PMD_DRIVER_NAME, rte_dpaa2_pmd); +RTE_PMD_REGISTER_PARAM_STRING(NET_DPAA2_PMD_DRIVER_NAME, + DRIVER_LOOPBACK_MODE "= " + DRIVER_NO_PREFETCH_MODE "=" + DRIVER_TX_CONF "=" + DRIVER_ERROR_QUEUE "="); +RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_pmd, NOTICE);