X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_ethdev.c;h=2cde55e7ccc4c5fb9e0ee9a37eb9b25aa44a00db;hb=0d20cda8a935d0df3c6e483422151705200907d0;hp=146566ecbb51377112bc6fd19d1c6dd106a8ba2f;hpb=876b2c902ea0480f3bd0f4f9361e8e0ad9745ff3;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 146566ecbb..2cde55e7cc 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "dpaa2_pmd_logs.h" #include @@ -25,8 +26,46 @@ #include #include #include "dpaa2_ethdev.h" +#include "dpaa2_sparser.h" #include +#define DRIVER_LOOPBACK_MODE "drv_loopback" +#define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch" + +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_SCTP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_OUTER_UDP_CKSUM | + DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_VLAN_FILTER | + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_TIMESTAMP; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_RSS_HASH | + DEV_RX_OFFLOAD_SCATTER; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_MULTI_SEGS; + +/* enable timestamp in mbuf */ +enum pmd_dpaa2_ts dpaa2_enable_ts; + struct rte_dpaa2_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; uint8_t page_id; /* dpni statistics page id */ @@ -47,6 +86,16 @@ static const struct rte_dpaa2_xstats_name_off dpaa2_xstats_strings[] = { {"ingress_nobuffer_discards", 2, 2}, {"egress_discarded_frames", 2, 3}, {"egress_confirmed_frames", 2, 4}, + {"cgr_reject_frames", 4, 0}, + {"cgr_reject_bytes", 4, 1}, +}; + +static const enum rte_filter_op dpaa2_supported_filter_ops[] = { + RTE_ETH_FILTER_ADD, + RTE_ETH_FILTER_DELETE, + RTE_ETH_FILTER_UPDATE, + RTE_ETH_FILTER_FLUSH, + RTE_ETH_FILTER_GET }; static struct rte_dpaa2_driver rte_dpaa2_pmd; @@ -59,12 +108,18 @@ static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); int dpaa2_logtype_pmd; +void +rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable) +{ + dpaa2_enable_ts = enable; +} + static int dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -74,8 +129,8 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } if (on) - ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, - priv->token, vlan_id); + ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, + vlan_id, 0, 0, 0); else ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id); @@ -91,7 +146,7 @@ static int dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; int ret; PMD_INIT_FUNC_TRACE(); @@ -103,7 +158,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) goto next_mask; } - if (dev->data->dev_conf.rxmode.hw_vlan_filter) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_FILTER) ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW, priv->token, true); else @@ -114,21 +170,59 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) } next_mask: if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_VLAN_EXTEND) DPAA2_PMD_INFO("VLAN extend offload not supported"); } return 0; } +static int +dpaa2_vlan_tpid_set(struct rte_eth_dev *dev, + enum rte_vlan_type vlan_type __rte_unused, + uint16_t tpid) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct fsl_mc_io *dpni = dev->process_private; + int ret = -ENOTSUP; + + PMD_INIT_FUNC_TRACE(); + + /* nothing to be done for standard vlan tpids */ + if (tpid == 0x8100 || tpid == 0x88A8) + return 0; + + ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid); + if (ret < 0) + DPAA2_PMD_INFO("Unable to set vlan tpid = %d", ret); + /* if already configured tpids, remove them first */ + if (ret == -EBUSY) { + struct dpni_custom_tpid_cfg tpid_list = {0}; + + ret = dpni_get_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, &tpid_list); + if (ret < 0) + goto fail; + ret = dpni_remove_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid_list.tpid1); + if (ret < 0) + goto fail; + ret = dpni_add_custom_tpid(dpni, CMD_PRI_LOW, + priv->token, tpid); + } +fail: + return ret; +} + static int dpaa2_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size) { int ret; - struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; struct mc_soc_version mc_plat_info = {0}; struct mc_version mc_ver_info = {0}; @@ -154,7 +248,7 @@ dpaa2_fw_version_get(struct rte_eth_dev *dev, return 0; } -static void +static int dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct dpaa2_dev_priv *priv = dev->data->dev_private; @@ -168,20 +262,20 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues; dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues; - dev_info->rx_offload_capa = - DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM | - DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM; - dev_info->tx_offload_capa = - DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM | - DEV_TX_OFFLOAD_SCTP_CKSUM | - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | ETH_LINK_SPEED_10G; + + dev_info->max_hash_mac_addrs = 0; + dev_info->max_vfs = 0; + dev_info->max_vmdq_pools = ETH_16_POOLS; + dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; + + return 0; } static int @@ -190,6 +284,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) struct dpaa2_dev_priv *priv = dev->data->dev_private; uint16_t dist_idx; uint32_t vq_id; + uint8_t num_rxqueue_per_tc; struct dpaa2_queue *mc_q, *mcq; uint32_t tot_queues; int i; @@ -197,7 +292,11 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; + num_rxqueue_per_tc = (priv->nb_rx_queues / priv->num_rx_tc); + if (priv->tx_conf_en) + tot_queues = priv->nb_rx_queues + 2 * priv->nb_tx_queues; + else + tot_queues = priv->nb_rx_queues + priv->nb_tx_queues; mc_q = rte_malloc(NULL, sizeof(struct dpaa2_queue) * tot_queues, RTE_CACHE_LINE_SIZE); if (!mc_q) { @@ -206,7 +305,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) } for (i = 0; i < priv->nb_rx_queues; i++) { - mc_q->dev = dev; + mc_q->eth_data = dev->data; priv->rx_vq[i] = mc_q++; dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; dpaa2_q->q_storage = rte_malloc("dq_storage", @@ -222,7 +321,7 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) } for (i = 0; i < priv->nb_tx_queues; i++) { - mc_q->dev = dev; + mc_q->eth_data = dev->data; mc_q->flow_id = 0xffff; priv->tx_vq[i] = mc_q++; dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; @@ -232,15 +331,45 @@ dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) goto fail_tx; } + if (priv->tx_conf_en) { + /*Setup tx confirmation queues*/ + for (i = 0; i < priv->nb_tx_queues; i++) { + mc_q->eth_data = dev->data; + mc_q->tc_index = i; + mc_q->flow_id = 0; + priv->tx_conf_vq[i] = mc_q++; + dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; + dpaa2_q->q_storage = + rte_malloc("dq_storage", + sizeof(struct queue_storage_info_t), + RTE_CACHE_LINE_SIZE); + if (!dpaa2_q->q_storage) + goto fail_tx_conf; + + memset(dpaa2_q->q_storage, 0, + sizeof(struct queue_storage_info_t)); + if (dpaa2_alloc_dq_storage(dpaa2_q->q_storage)) + goto fail_tx_conf; + } + } + vq_id = 0; for (dist_idx = 0; dist_idx < priv->nb_rx_queues; dist_idx++) { mcq = (struct dpaa2_queue *)priv->rx_vq[vq_id]; - mcq->tc_index = DPAA2_DEF_TC; - mcq->flow_id = dist_idx; + mcq->tc_index = dist_idx / num_rxqueue_per_tc; + mcq->flow_id = dist_idx % num_rxqueue_per_tc; vq_id++; } return 0; +fail_tx_conf: + i -= 1; + while (i >= 0) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_conf_vq[i]; + rte_free(dpaa2_q->q_storage); + priv->tx_conf_vq[i--] = NULL; + } + i = priv->nb_tx_queues; fail_tx: i -= 1; while (i >= 0) { @@ -262,26 +391,88 @@ fail: return -1; } +static void +dpaa2_free_rx_tx_queues(struct rte_eth_dev *dev) +{ + struct dpaa2_dev_priv *priv = dev->data->dev_private; + struct dpaa2_queue *dpaa2_q; + int i; + + PMD_INIT_FUNC_TRACE(); + + /* Queue allocation base */ + if (priv->rx_vq[0]) { + /* cleaning up queue storage */ + for (i = 0; i < priv->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + if (dpaa2_q->q_storage) + rte_free(dpaa2_q->q_storage); + } + /* cleanup tx queue cscn */ + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + rte_free(dpaa2_q->cscn); + } + if (priv->tx_conf_en) { + /* cleanup tx conf queue storage */ + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *) + priv->tx_conf_vq[i]; + rte_free(dpaa2_q->q_storage); + } + } + /*free memory for all queues (RX+TX) */ + rte_free(priv->rx_vq[0]); + priv->rx_vq[0] = NULL; + } +} + static int dpaa2_eth_dev_configure(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = priv->hw; + struct fsl_mc_io *dpni = dev->process_private; struct rte_eth_conf *eth_conf = &dev->data->dev_conf; - int rx_ip_csum_offload = false; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; + int rx_l3_csum_offload = false; + int rx_l4_csum_offload = false; + int tx_l3_csum_offload = false; + int tx_l4_csum_offload = false; int ret; PMD_INIT_FUNC_TRACE(); - if (eth_conf->rxmode.jumbo_frame == 1) { + /* Rx offloads which are enabled by default */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA2_PMD_INFO( + "Some of rx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads which are enabled by default */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA2_PMD_INFO( + "Some of tx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) { ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, - priv->token, eth_conf->rxmode.max_rx_pkt_len); + priv->token, eth_conf->rxmode.max_rx_pkt_len + - RTE_ETHER_CRC_LEN); if (ret) { DPAA2_PMD_ERR( "Unable to set mtu. check config"); return ret; } + dev->data->mtu = + dev->data->dev_conf.rxmode.max_rx_pkt_len - + RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - + VLAN_TAG_SIZE; } else { return -1; } @@ -297,32 +488,48 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) } } - if (eth_conf->rxmode.hw_ip_checksum) - rx_ip_csum_offload = true; + if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) + rx_l3_csum_offload = true; + + if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) || + (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM) || + (rx_offloads & DEV_RX_OFFLOAD_SCTP_CKSUM)) + rx_l4_csum_offload = true; ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload); + DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload); if (ret) { DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret); return ret; } ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload); + DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload); if (ret) { DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret); return ret; } + if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) + dpaa2_enable_ts = true; + + if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) + tx_l3_csum_offload = true; + + if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) || + (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM)) + tx_l4_csum_offload = true; + ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_TX_L3_CSUM, true); + DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload); if (ret) { DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret); return ret; } ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token, - DPNI_OFF_TX_L4_CSUM, true); + DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload); if (ret) { DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret); return ret; @@ -343,7 +550,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) } } - if (eth_conf->rxmode.hw_vlan_filter) + if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); /* update the current status */ @@ -358,19 +565,19 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) static int dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, - uint16_t nb_rx_desc __rte_unused, + uint16_t nb_rx_desc, unsigned int socket_id __rte_unused, const struct rte_eth_rxconf *rx_conf __rte_unused, struct rte_mempool *mb_pool) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_q; struct dpni_queue cfg; uint8_t options = 0; uint8_t flow_id; uint32_t bpid; - int ret; + int i, ret; PMD_INIT_FUNC_TRACE(); @@ -386,14 +593,31 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, } dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ + dpaa2_q->bp_array = rte_dpaa2_bpid_info; /*Get the flow id from given VQ id*/ - flow_id = rx_queue_id % priv->nb_rx_queues; + flow_id = dpaa2_q->flow_id; memset(&cfg, 0, sizeof(struct dpni_queue)); options = options | DPNI_QUEUE_OPT_USER_CTX; cfg.user_context = (size_t)(dpaa2_q); + /* check if a private cgr available. */ + for (i = 0; i < priv->max_cgs; i++) { + if (!priv->cgid_in_use[i]) { + priv->cgid_in_use[i] = 1; + break; + } + } + + if (i < priv->max_cgs) { + options |= DPNI_QUEUE_OPT_SET_CGID; + cfg.cgid = i; + dpaa2_q->cgid = cfg.cgid; + } else { + dpaa2_q->cgid = 0xff; + } + /*if ls2088 or rev2 device, enable the stashing */ if ((dpaa2_svr_family & 0xffff0000) != SVR_LS2080A) { @@ -422,15 +646,56 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, struct dpni_taildrop taildrop; taildrop.enable = 1; - /*enabling per rx queue congestion control */ - taildrop.threshold = CONG_THRESHOLD_RX_Q; - taildrop.units = DPNI_CONGESTION_UNIT_BYTES; - taildrop.oal = CONG_RX_OAL; - DPAA2_PMD_DEBUG("Enabling Early Drop on queue = %d", - rx_queue_id); - ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + + /* Private CGR will use tail drop length as nb_rx_desc. + * for rest cases we can use standard byte based tail drop. + * There is no HW restriction, but number of CGRs are limited, + * hence this restriction is placed. + */ + if (dpaa2_q->cgid != 0xff) { + /*enabling per rx queue congestion control */ + taildrop.threshold = nb_rx_desc; + taildrop.units = DPNI_CONGESTION_UNIT_FRAMES; + taildrop.oal = 0; + DPAA2_PMD_DEBUG("Enabling CG Tail Drop on queue = %d", + rx_queue_id); + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_CONGESTION_GROUP, + DPNI_QUEUE_RX, + dpaa2_q->tc_index, + flow_id, &taildrop); + } else { + /*enabling per rx queue congestion control */ + taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q; + taildrop.units = DPNI_CONGESTION_UNIT_BYTES; + taildrop.oal = CONG_RX_OAL; + DPAA2_PMD_DEBUG("Enabling Byte based Drop on queue= %d", + rx_queue_id); + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_QUEUE, DPNI_QUEUE_RX, + dpaa2_q->tc_index, flow_id, + &taildrop); + } + if (ret) { + DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", + ret); + return -1; + } + } else { /* Disable tail Drop */ + struct dpni_taildrop taildrop = {0}; + DPAA2_PMD_INFO("Tail drop is disabled on queue"); + + taildrop.enable = 0; + if (dpaa2_q->cgid != 0xff) { + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, + DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, + dpaa2_q->tc_index, + flow_id, &taildrop); + } else { + ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, DPNI_CP_QUEUE, DPNI_QUEUE_RX, dpaa2_q->tc_index, flow_id, &taildrop); + } if (ret) { DPAA2_PMD_ERR("Error in setting taildrop. err=(%d)", ret); @@ -452,10 +717,13 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, struct dpaa2_dev_priv *priv = dev->data->dev_private; struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) priv->tx_vq[tx_queue_id]; - struct fsl_mc_io *dpni = priv->hw; + struct dpaa2_queue *dpaa2_tx_conf_q = (struct dpaa2_queue *) + priv->tx_conf_vq[tx_queue_id]; + struct fsl_mc_io *dpni = dev->process_private; struct dpni_queue tx_conf_cfg; struct dpni_queue tx_flow_cfg; uint8_t options = 0, flow_id; + struct dpni_queue_id qid; uint32_t tc_id; int ret; @@ -486,9 +754,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, if (tx_queue_id == 0) { /*Set tx-conf and error configuration*/ - ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, - priv->token, - DPNI_CONF_DISABLE); + if (priv->tx_conf_en) + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_AFFINE); + else + ret = dpni_set_tx_confirmation_mode(dpni, CMD_PRI_LOW, + priv->token, + DPNI_CONF_DISABLE); if (ret) { DPAA2_PMD_ERR("Error in set tx conf mode settings: " "err=%d", ret); @@ -497,8 +770,17 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, } dpaa2_q->tc_index = tc_id; + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX, dpaa2_q->tc_index, + dpaa2_q->flow_id, &tx_flow_cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); + return -1; + } + dpaa2_q->fqid = qid.fqid; + if (!(priv->flags & DPAA2_TX_CGR_OFF)) { - struct dpni_congestion_notification_cfg cong_notif_cfg; + struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; @@ -507,12 +789,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, */ cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; cong_notif_cfg.message_ctx = 0; - cong_notif_cfg.message_iova = (size_t)dpaa2_q->cscn; + cong_notif_cfg.message_iova = + (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); cong_notif_cfg.dest_cfg.dest_type = DPNI_DEST_NONE; cong_notif_cfg.notification_mode = DPNI_CONG_OPT_WRITE_MEM_ON_ENTER | DPNI_CONG_OPT_WRITE_MEM_ON_EXIT | DPNI_CONG_OPT_COHERENT_WRITE; + cong_notif_cfg.cg_point = DPNI_CP_QUEUE; ret = dpni_set_congestion_notification(dpni, CMD_PRI_LOW, priv->token, @@ -526,14 +810,63 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, return -ret; } } + dpaa2_q->cb_eqresp_free = dpaa2_dev_free_eqresp_buf; dev->data->tx_queues[tx_queue_id] = dpaa2_q; + + if (priv->tx_conf_en) { + dpaa2_q->tx_conf_queue = dpaa2_tx_conf_q; + options = options | DPNI_QUEUE_OPT_USER_CTX; + tx_conf_cfg.user_context = (size_t)(dpaa2_q); + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, options, &tx_conf_cfg); + if (ret) { + DPAA2_PMD_ERR("Error in setting the tx conf flow: " + "tc_index=%d, flow=%d err=%d", + dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, ret); + return -1; + } + + ret = dpni_get_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_TX_CONFIRM, dpaa2_tx_conf_q->tc_index, + dpaa2_tx_conf_q->flow_id, &tx_conf_cfg, &qid); + if (ret) { + DPAA2_PMD_ERR("Error in getting LFQID err=%d", ret); + return -1; + } + dpaa2_tx_conf_q->fqid = qid.fqid; + } return 0; } static void dpaa2_dev_rx_queue_release(void *q __rte_unused) { + struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)q; + struct dpaa2_dev_priv *priv = dpaa2_q->eth_data->dev_private; + struct fsl_mc_io *dpni = + (struct fsl_mc_io *)priv->eth_dev->process_private; + uint8_t options = 0; + int ret; + struct dpni_queue cfg; + + memset(&cfg, 0, sizeof(struct dpni_queue)); PMD_INIT_FUNC_TRACE(); + if (dpaa2_q->cgid != 0xff) { + options = DPNI_QUEUE_OPT_CLEAR_CGID; + cfg.cgid = dpaa2_q->cgid; + + ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, + DPNI_QUEUE_RX, + dpaa2_q->tc_index, dpaa2_q->flow_id, + options, &cfg); + if (ret) + DPAA2_PMD_ERR("Unable to clear CGR from q=%u err=%d", + dpaa2_q->fqid, ret); + priv->cgid_in_use[dpaa2_q->cgid] = 0; + dpaa2_q->cgid = 0xff; + } } static void @@ -590,7 +923,9 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev) RTE_PTYPE_UNKNOWN }; - if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx) + if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx || + dev->rx_pkt_burst == dpaa2_dev_rx || + dev->rx_pkt_burst == dpaa2_dev_loopback_rx) return ptypes; return NULL; } @@ -609,7 +944,7 @@ dpaa2_interrupt_handler(void *param) { struct rte_eth_dev *dev = param; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; int irq_index = DPNI_IRQ_INDEX; unsigned int status = 0, clear = 0; @@ -648,7 +983,7 @@ dpaa2_eth_setup_irqs(struct rte_eth_dev *dev, int enable) { int err = 0; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int irq_index = DPNI_IRQ_INDEX; unsigned int mask = DPNI_IRQ_EVENT_LINK_CHANGED; @@ -678,7 +1013,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) struct rte_dpaa2_device *dpaa2_dev; struct rte_eth_dev_data *data = dev->data; struct dpaa2_dev_priv *priv = data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpni_queue cfg; struct dpni_error_cfg err_cfg; uint16_t qdid; @@ -725,6 +1060,7 @@ dpaa2_dev_start(struct rte_eth_dev *dev) /*checksum errors, send them to normal path and set it in annotation */ err_cfg.errors = DPNI_ERROR_L3CE | DPNI_ERROR_L4CE; + err_cfg.errors |= DPNI_ERROR_PHE; err_cfg.error_action = DPNI_ERROR_ACTION_CONTINUE; err_cfg.set_frame_annotation = true; @@ -755,6 +1091,10 @@ dpaa2_dev_start(struct rte_eth_dev *dev) dpaa2_eth_setup_irqs(dev, 1); } + /* Change the tx burst function if ordered queues are used */ + if (priv->en_ordered) + dev->tx_pkt_burst = dpaa2_dev_tx_ordered; + return 0; } @@ -766,7 +1106,7 @@ static void dpaa2_dev_stop(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int ret; struct rte_eth_link link; struct rte_intr_handle *intr_handle = dev->intr_handle; @@ -805,22 +1145,14 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) static void dpaa2_dev_close(struct rte_eth_dev *dev) { - struct rte_eth_dev_data *data = dev->data; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - int i, ret; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int ret; struct rte_eth_link link; - struct dpaa2_queue *dpaa2_q; PMD_INIT_FUNC_TRACE(); - for (i = 0; i < data->nb_tx_queues; i++) { - dpaa2_q = (struct dpaa2_queue *)data->tx_queues[i]; - if (!dpaa2_q->cscn) { - rte_free(dpaa2_q->cscn); - dpaa2_q->cscn = NULL; - } - } + dpaa2_flow_clean(dev); /* Clean the device first */ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); @@ -833,19 +1165,19 @@ dpaa2_dev_close(struct rte_eth_dev *dev) rte_eth_linkstatus_set(dev, &link); } -static void +static int dpaa2_dev_promiscuous_enable( struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -ENODEV; } ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); @@ -855,21 +1187,23 @@ dpaa2_dev_promiscuous_enable( ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) DPAA2_PMD_ERR("Unable to enable M promisc mode %d", ret); + + return ret; } -static void +static int dpaa2_dev_promiscuous_disable( struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -ENODEV; } ret = dpni_set_unicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); @@ -883,49 +1217,55 @@ dpaa2_dev_promiscuous_disable( DPAA2_PMD_ERR("Unable to disable M promisc mode %d", ret); } + + return ret; } -static void +static int dpaa2_dev_allmulticast_enable( struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -ENODEV; } ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, true); if (ret < 0) DPAA2_PMD_ERR("Unable to enable multicast mode %d", ret); + + return ret; } -static void +static int dpaa2_dev_allmulticast_disable(struct rte_eth_dev *dev) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -ENODEV; } /* must remain on for all promiscuous */ if (dev->data->promiscuous == 1) - return; + return 0; ret = dpni_set_multicast_promisc(dpni, CMD_PRI_LOW, priv->token, false); if (ret < 0) DPAA2_PMD_ERR("Unable to disable multicast mode %d", ret); + + return ret; } static int @@ -933,8 +1273,8 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; PMD_INIT_FUNC_TRACE(); @@ -945,13 +1285,15 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } /* check that mtu is within the allowed range */ - if ((mtu < ETHER_MIN_MTU) || (frame_size > DPAA2_MAX_RX_PKT_LEN)) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA2_MAX_RX_PKT_LEN) return -EINVAL; - if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + if (frame_size > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -959,7 +1301,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) * Maximum Ethernet header length */ ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW, priv->token, - frame_size); + frame_size - RTE_ETHER_CRC_LEN); if (ret) { DPAA2_PMD_ERR("Setting the max frame length failed"); return -1; @@ -970,13 +1312,13 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) static int dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr, + struct rte_ether_addr *addr, __rte_unused uint32_t index, __rte_unused uint32_t pool) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); @@ -985,8 +1327,8 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, return -1; } - ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, - priv->token, addr->addr_bytes); + ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, + addr->addr_bytes, 0, 0, 0); if (ret) DPAA2_PMD_ERR( "error: Adding the MAC ADDR failed: err = %d", ret); @@ -999,9 +1341,9 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct rte_eth_dev_data *data = dev->data; - struct ether_addr *macaddr; + struct rte_ether_addr *macaddr; PMD_INIT_FUNC_TRACE(); @@ -1019,19 +1361,19 @@ dpaa2_dev_remove_mac_addr(struct rte_eth_dev *dev, "error: Removing the MAC ADDR failed: err = %d", ret); } -static void +static int dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr) + struct rte_ether_addr *addr) { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -EINVAL; } ret = dpni_set_primary_mac_addr(dpni, CMD_PRI_LOW, @@ -1040,6 +1382,8 @@ dpaa2_dev_set_mac_addr(struct rte_eth_dev *dev, if (ret) DPAA2_PMD_ERR( "error: Setting the MAC ADDR failed %d", ret); + + return ret; } static @@ -1047,10 +1391,12 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int32_t retcode; uint8_t page0 = 0, page1 = 1, page2 = 2; union dpni_statistics value; + int i; + struct dpaa2_queue *dpaa2_rxq, *dpaa2_txq; memset(&value, 0, sizeof(union dpni_statistics)); @@ -1098,6 +1444,21 @@ int dpaa2_dev_stats_get(struct rte_eth_dev *dev, stats->oerrors = value.page_2.egress_discarded_frames; stats->imissed = value.page_2.ingress_nobuffer_discards; + /* Fill in per queue stats */ + for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) && + (i < priv->nb_rx_queues || i < priv->nb_tx_queues); ++i) { + dpaa2_rxq = (struct dpaa2_queue *)priv->rx_vq[i]; + dpaa2_txq = (struct dpaa2_queue *)priv->tx_vq[i]; + if (dpaa2_rxq) + stats->q_ipackets[i] = dpaa2_rxq->rx_pkts; + if (dpaa2_txq) + stats->q_opackets[i] = dpaa2_txq->tx_pkts; + + /* Byte counting is not implemented */ + stats->q_ibytes[i] = 0; + stats->q_obytes[i] = 0; + } + return 0; err: @@ -1110,9 +1471,9 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int n) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; int32_t retcode; - union dpni_statistics value[3] = {}; + union dpni_statistics value[5] = {}; unsigned int i = 0, num = RTE_DIM(dpaa2_xstats_strings); if (n < num) @@ -1139,6 +1500,18 @@ dpaa2_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, if (retcode) goto err; + for (i = 0; i < priv->max_cgs; i++) { + if (!priv->cgid_in_use[i]) { + /* Get Counters from page_4*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, + priv->token, + 4, 0, &value[4]); + if (retcode) + goto err; + break; + } + } + for (i = 0; i < num; i++) { xstats[i].id = i; xstats[i].value = value[dpaa2_xstats_strings[i].page_id]. @@ -1162,10 +1535,9 @@ dpaa2_xstats_get_names(__rte_unused struct rte_eth_dev *dev, if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) - snprintf(xstats_names[i].name, - sizeof(xstats_names[i].name), - "%s", - dpaa2_xstats_strings[i].name); + strlcpy(xstats_names[i].name, + dpaa2_xstats_strings[i].name, + sizeof(xstats_names[i].name)); return stat_cnt; } @@ -1179,9 +1551,10 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, if (!ids) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = + (struct fsl_mc_io *)dev->process_private; int32_t retcode; - union dpni_statistics value[3] = {}; + union dpni_statistics value[5] = {}; if (n < stat_cnt) return stat_cnt; @@ -1207,6 +1580,12 @@ dpaa2_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, if (retcode) return 0; + /* Get Counters from page_4*/ + retcode = dpni_get_statistics(dpni, CMD_PRI_LOW, priv->token, + 4, 0, &value[4]); + if (retcode) + return 0; + for (i = 0; i < stat_cnt; i++) { values[i] = value[dpaa2_xstats_strings[i].page_id]. raw.counter[dpaa2_xstats_strings[i].stats_id]; @@ -1251,29 +1630,44 @@ dpaa2_xstats_get_names_by_id( return limit; } -static void +static int dpaa2_dev_stats_reset(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; - int32_t retcode; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; + int retcode; + int i; + struct dpaa2_queue *dpaa2_q; PMD_INIT_FUNC_TRACE(); if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); - return; + return -EINVAL; } retcode = dpni_reset_statistics(dpni, CMD_PRI_LOW, priv->token); if (retcode) goto error; - return; + /* Reset the per queue stats in dpaa2_queue structure */ + for (i = 0; i < priv->nb_rx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; + if (dpaa2_q) + dpaa2_q->rx_pkts = 0; + } + + for (i = 0; i < priv->nb_tx_queues; i++) { + dpaa2_q = (struct dpaa2_queue *)priv->tx_vq[i]; + if (dpaa2_q) + dpaa2_q->tx_pkts = 0; + } + + return 0; error: DPAA2_PMD_ERR("Operation not completed:Error Code = %d", retcode); - return; + return retcode; }; /* return 0 means link status changed, -1 means not changed */ @@ -1283,7 +1677,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, { int ret; struct dpaa2_dev_priv *priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct rte_eth_link link; struct dpni_link_state state = {0}; @@ -1294,7 +1688,7 @@ dpaa2_dev_link_update(struct rte_eth_dev *dev, ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret < 0) { - DPAA2_PMD_ERR("error: dpni_get_link_state %d", ret); + DPAA2_PMD_DEBUG("error: dpni_get_link_state %d", ret); return -1; } @@ -1331,7 +1725,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) struct dpni_link_state state = {0}; priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); @@ -1356,7 +1750,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) } ret = dpni_get_link_state(dpni, CMD_PRI_LOW, priv->token, &state); if (ret < 0) { - DPAA2_PMD_ERR("Unable to get link state (%d)", ret); + DPAA2_PMD_DEBUG("Unable to get link state (%d)", ret); return -1; } @@ -1387,7 +1781,7 @@ dpaa2_dev_set_link_down(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("Device has not yet been configured"); @@ -1441,7 +1835,7 @@ dpaa2_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL || fc_conf == NULL) { DPAA2_PMD_ERR("device not configured"); @@ -1497,7 +1891,7 @@ dpaa2_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) PMD_INIT_FUNC_TRACE(); priv = dev->data->dev_private; - dpni = (struct fsl_mc_io *)priv->hw; + dpni = (struct fsl_mc_io *)dev->process_private; if (dpni == NULL) { DPAA2_PMD_ERR("dpni is NULL"); @@ -1613,35 +2007,75 @@ dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, int eth_rx_queue_id, - uint16_t dpcon_id, + struct dpaa2_dpcon_dev *dpcon, const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) { struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; uint8_t flow_id = dpaa2_ethq->flow_id; struct dpni_queue cfg; - uint8_t options; + uint8_t options, priority; int ret; if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) dpaa2_ethq->cb = dpaa2_dev_process_parallel_event; else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) dpaa2_ethq->cb = dpaa2_dev_process_atomic_event; + else if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED) + dpaa2_ethq->cb = dpaa2_dev_process_ordered_event; else return -EINVAL; + priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * + (dpcon->num_priorities - 1); + memset(&cfg, 0, sizeof(struct dpni_queue)); options = DPNI_QUEUE_OPT_DEST; cfg.destination.type = DPNI_DEST_DPCON; - cfg.destination.id = dpcon_id; - cfg.destination.priority = queue_conf->ev.priority; + cfg.destination.id = dpcon->dpcon_id; + cfg.destination.priority = priority; if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; cfg.destination.hold_active = 1; } + if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ORDERED && + !eth_priv->en_ordered) { + struct opr_cfg ocfg; + + /* Restoration window size = 256 frames */ + ocfg.oprrws = 3; + /* Restoration window size = 512 frames for LX2 */ + if (dpaa2_svr_family == SVR_LX2160A) + ocfg.oprrws = 4; + /* Auto advance NESN window enabled */ + ocfg.oa = 1; + /* Late arrival window size disabled */ + ocfg.olws = 0; + /* ORL resource exhaustaion advance NESN disabled */ + ocfg.oeane = 0; + /* Loose ordering enabled */ + ocfg.oloe = 1; + eth_priv->en_loose_ordered = 1; + /* Strict ordering enabled if explicitly set */ + if (getenv("DPAA2_STRICT_ORDERING_ENABLE")) { + ocfg.oloe = 0; + eth_priv->en_loose_ordered = 0; + } + + ret = dpni_set_opr(dpni, CMD_PRI_LOW, eth_priv->token, + dpaa2_ethq->tc_index, flow_id, + OPR_OPT_CREATE, &ocfg); + if (ret) { + DPAA2_PMD_ERR("Error setting opr: ret: %d\n", ret); + return ret; + } + + eth_priv->en_ordered = 1; + } + options |= DPNI_QUEUE_OPT_USER_CTX; cfg.user_context = (size_t)(dpaa2_ethq); @@ -1661,7 +2095,7 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; uint8_t flow_id = dpaa2_ethq->flow_id; struct dpni_queue cfg; @@ -1680,6 +2114,47 @@ int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev, return ret; } +static inline int +dpaa2_dev_verify_filter_ops(enum rte_filter_op filter_op) +{ + unsigned int i; + + for (i = 0; i < RTE_DIM(dpaa2_supported_filter_ops); i++) { + if (dpaa2_supported_filter_ops[i] == filter_op) + return 0; + } + return -ENOTSUP; +} + +static int +dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev, + enum rte_filter_type filter_type, + enum rte_filter_op filter_op, + void *arg) +{ + int ret = 0; + + if (!dev) + return -ENODEV; + + switch (filter_type) { + case RTE_ETH_FILTER_GENERIC: + if (dpaa2_dev_verify_filter_ops(filter_op) < 0) { + ret = -ENOTSUP; + break; + } + *(const void **)arg = &dpaa2_flow_ops; + dpaa2_filter_type |= filter_type; + break; + default: + RTE_LOG(ERR, PMD, "Filter type (%d) not supported", + filter_type); + ret = -ENOTSUP; + break; + } + return ret; +} + static struct eth_dev_ops dpaa2_ethdev_ops = { .dev_configure = dpaa2_eth_dev_configure, .dev_start = dpaa2_dev_start, @@ -1705,6 +2180,7 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { .mtu_set = dpaa2_dev_mtu_set, .vlan_filter_set = dpaa2_vlan_filter_set, .vlan_offload_set = dpaa2_vlan_offload_set, + .vlan_tpid_set = dpaa2_vlan_tpid_set, .rx_queue_setup = dpaa2_dev_rx_queue_setup, .rx_queue_release = dpaa2_dev_rx_queue_release, .tx_queue_setup = dpaa2_dev_tx_queue_setup, @@ -1717,8 +2193,124 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { .mac_addr_set = dpaa2_dev_set_mac_addr, .rss_hash_update = dpaa2_dev_rss_hash_update, .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, + .filter_ctrl = dpaa2_dev_flow_ctrl, +#if defined(RTE_LIBRTE_IEEE1588) + .timesync_enable = dpaa2_timesync_enable, + .timesync_disable = dpaa2_timesync_disable, + .timesync_read_time = dpaa2_timesync_read_time, + .timesync_write_time = dpaa2_timesync_write_time, + .timesync_adjust_time = dpaa2_timesync_adjust_time, + .timesync_read_rx_timestamp = dpaa2_timesync_read_rx_timestamp, + .timesync_read_tx_timestamp = dpaa2_timesync_read_tx_timestamp, +#endif }; +/* Populate the mac address from physically available (u-boot/firmware) and/or + * one set by higher layers like MC (restool) etc. + * Returns the table of MAC entries (multiple entries) + */ +static int +populate_mac_addr(struct fsl_mc_io *dpni_dev, struct dpaa2_dev_priv *priv, + struct rte_ether_addr *mac_entry) +{ + int ret; + struct rte_ether_addr phy_mac, prime_mac; + + memset(&phy_mac, 0, sizeof(struct rte_ether_addr)); + memset(&prime_mac, 0, sizeof(struct rte_ether_addr)); + + /* Get the physical device MAC address */ + ret = dpni_get_port_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, + phy_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("DPNI get physical port MAC failed: %d", ret); + goto cleanup; + } + + ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, priv->token, + prime_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("DPNI get Prime port MAC failed: %d", ret); + goto cleanup; + } + + /* Now that both MAC have been obtained, do: + * if not_empty_mac(phy) && phy != Prime, overwrite prime with Phy + * and return phy + * If empty_mac(phy), return prime. + * if both are empty, create random MAC, set as prime and return + */ + if (!rte_is_zero_ether_addr(&phy_mac)) { + /* If the addresses are not same, overwrite prime */ + if (!rte_is_same_ether_addr(&phy_mac, &prime_mac)) { + ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, + priv->token, + phy_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("Unable to set MAC Address: %d", + ret); + goto cleanup; + } + memcpy(&prime_mac, &phy_mac, + sizeof(struct rte_ether_addr)); + } + } else if (rte_is_zero_ether_addr(&prime_mac)) { + /* In case phys and prime, both are zero, create random MAC */ + rte_eth_random_addr(prime_mac.addr_bytes); + ret = dpni_set_primary_mac_addr(dpni_dev, CMD_PRI_LOW, + priv->token, + prime_mac.addr_bytes); + if (ret) { + DPAA2_PMD_ERR("Unable to set MAC Address: %d", ret); + goto cleanup; + } + } + + /* prime_mac the final MAC address */ + memcpy(mac_entry, &prime_mac, sizeof(struct rte_ether_addr)); + return 0; + +cleanup: + return -1; +} + +static int +check_devargs_handler(__rte_unused const char *key, const char *value, + __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +dpaa2_get_devargs(struct rte_devargs *devargs, const char *key) +{ + struct rte_kvargs *kvlist; + + if (!devargs) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (!kvlist) + return 0; + + if (!rte_kvargs_count(kvlist, key)) { + rte_kvargs_free(kvlist); + return 0; + } + + if (rte_kvargs_process(kvlist, key, + check_devargs_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + static int dpaa2_dev_init(struct rte_eth_dev *eth_dev) { @@ -1728,25 +2320,38 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) struct dpni_attr attr; struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; struct dpni_buffer_layout layout; - int ret, hw_id; + int ret, hw_id, i; PMD_INIT_FUNC_TRACE(); - /* For secondary processes, the primary has done all the work */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); - - hw_id = dpaa2_dev->object_id; - dpni_dev = rte_malloc(NULL, sizeof(struct fsl_mc_io), 0); if (!dpni_dev) { DPAA2_PMD_ERR("Memory allocation failed for dpni device"); return -1; } - dpni_dev->regs = rte_mcp_ptr_list[0]; + eth_dev->process_private = (void *)dpni_dev; + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* In case of secondary, only burst and ops API need to be + * plugged. + */ + eth_dev->dev_ops = &dpaa2_ethdev_ops; + if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) + eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; + else if (dpaa2_get_devargs(dev->devargs, + DRIVER_NO_PREFETCH_MODE)) + eth_dev->rx_pkt_burst = dpaa2_dev_rx; + else + eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; + eth_dev->tx_pkt_burst = dpaa2_dev_tx; + return 0; + } + + dpaa2_dev = container_of(dev, struct rte_dpaa2_device, device); + + hw_id = dpaa2_dev->object_id; ret = dpni_open(dpni_dev, CMD_PRI_LOW, hw_id, &priv->token); if (ret) { DPAA2_PMD_ERR( @@ -1773,19 +2378,24 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) } priv->num_rx_tc = attr.num_rx_tcs; + /* only if the custom CG is enabled */ + if (attr.options & DPNI_OPT_CUSTOM_CG) + priv->max_cgs = attr.num_cgs; + else + priv->max_cgs = 0; - /* Resetting the "num_rx_queues" to equal number of queues in first TC - * as only one TC is supported on Rx Side. Once Multiple TCs will be - * in use for Rx processing then this will be changed or removed. - */ - priv->nb_rx_queues = attr.num_queues; + for (i = 0; i < priv->max_cgs; i++) + priv->cgid_in_use[i] = 0; + + for (i = 0; i < attr.num_rx_tcs; i++) + priv->nb_rx_queues += attr.num_queues; /* Using number of TX queues as number of TX TCs */ priv->nb_tx_queues = attr.num_tx_tcs; - DPAA2_PMD_DEBUG("RX-TC= %d, nb_rx_queues= %d, nb_tx_queues=%d", + DPAA2_PMD_DEBUG("RX-TC= %d, rx_queues= %d, tx_queues=%d, max_cgs=%d", priv->num_rx_tc, priv->nb_rx_queues, - priv->nb_tx_queues); + priv->nb_tx_queues, priv->max_cgs); priv->hw = dpni_dev; priv->hw_id = hw_id; @@ -1793,6 +2403,11 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) priv->max_mac_filters = attr.mac_filter_entries; priv->max_vlan_filters = attr.vlan_filter_entries; priv->flags = 0; +#if defined(RTE_LIBRTE_IEEE1588) + priv->tx_conf_en = 1; +#else + priv->tx_conf_en = 0; +#endif /* Allocate memory for hardware structure for queues */ ret = dpaa2_alloc_rx_tx_queues(eth_dev); @@ -1801,29 +2416,37 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) goto init_err; } - /* Allocate memory for storing MAC addresses */ + /* Allocate memory for storing MAC addresses. + * Table of mac_filter_entries size is allocated so that RTE ether lib + * can add MAC entries when rte_eth_dev_mac_addr_add is called. + */ eth_dev->data->mac_addrs = rte_zmalloc("dpni", - ETHER_ADDR_LEN * attr.mac_filter_entries, 0); + RTE_ETHER_ADDR_LEN * attr.mac_filter_entries, 0); if (eth_dev->data->mac_addrs == NULL) { DPAA2_PMD_ERR( "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * attr.mac_filter_entries); + RTE_ETHER_ADDR_LEN * attr.mac_filter_entries); ret = -ENOMEM; goto init_err; } - ret = dpni_get_primary_mac_addr(dpni_dev, CMD_PRI_LOW, - priv->token, - (uint8_t *)(eth_dev->data->mac_addrs[0].addr_bytes)); + ret = populate_mac_addr(dpni_dev, priv, ð_dev->data->mac_addrs[0]); if (ret) { - DPAA2_PMD_ERR("DPNI get mac address failed:Err Code = %d", - ret); + DPAA2_PMD_ERR("Unable to fetch MAC Address for device"); + rte_free(eth_dev->data->mac_addrs); + eth_dev->data->mac_addrs = NULL; goto init_err; } /* ... tx buffer layout ... */ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); - layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + if (priv->tx_conf_en) { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + layout.pass_timestamp = true; + } else { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + } layout.pass_frame_status = 1; ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, &layout); @@ -1834,7 +2457,13 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) /* ... tx-conf and error buffer layout ... */ memset(&layout, 0, sizeof(struct dpni_buffer_layout)); - layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + if (priv->tx_conf_en) { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | + DPNI_BUF_LAYOUT_OPT_TIMESTAMP; + layout.pass_timestamp = true; + } else { + layout.options = DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; + } layout.pass_frame_status = 1; ret = dpni_set_buffer_layout(dpni_dev, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX_CONFIRM, &layout); @@ -1846,10 +2475,69 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &dpaa2_ethdev_ops; - eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; + if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) { + eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; + DPAA2_PMD_INFO("Loopback mode"); + } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) { + eth_dev->rx_pkt_burst = dpaa2_dev_rx; + DPAA2_PMD_INFO("No Prefetch mode"); + } else { + eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx; + } eth_dev->tx_pkt_burst = dpaa2_dev_tx; - DPAA2_PMD_INFO("%s: netdev created", eth_dev->data->name); + /*Init fields w.r.t. classficaition*/ + memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg)); + priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); + if (!priv->extract.qos_extract_param) { + DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " + " classificaiton ", ret); + goto init_err; + } + for (i = 0; i < MAX_TCS; i++) { + memset(&priv->extract.fs_key_cfg[i], 0, + sizeof(struct dpkg_profile_cfg)); + priv->extract.fs_extract_param[i] = + (size_t)rte_malloc(NULL, 256, 64); + if (!priv->extract.fs_extract_param[i]) { + DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton", + ret); + goto init_err; + } + } + + ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, + RTE_ETHER_MAX_LEN - RTE_ETHER_CRC_LEN + + VLAN_TAG_SIZE); + if (ret) { + DPAA2_PMD_ERR("Unable to set mtu. check config"); + goto init_err; + } + + /*TODO To enable soft parser support DPAA2 driver needs to integrate + * with external entity to receive byte code for software sequence + * and same will be offload to the H/W using MC interface. + * Currently it is assumed that DPAA2 driver has byte code by some + * mean and same if offloaded to H/W. + */ + if (getenv("DPAA2_ENABLE_SOFT_PARSER")) { + WRIOP_SS_INITIALIZER(priv); + ret = dpaa2_eth_load_wriop_soft_parser(priv, DPNI_SS_INGRESS); + if (ret < 0) { + DPAA2_PMD_ERR(" Error(%d) in loading softparser\n", + ret); + return ret; + } + + ret = dpaa2_eth_enable_wriop_soft_parser(priv, + DPNI_SS_INGRESS); + if (ret < 0) { + DPAA2_PMD_ERR(" Error(%d) in enabling softparser\n", + ret); + return ret; + } + } + RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); return 0; init_err: dpaa2_dev_uninit(eth_dev); @@ -1860,9 +2548,8 @@ static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) { struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)priv->hw; + struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_dev->process_private; int i, ret; - struct dpaa2_queue *dpaa2_q; PMD_INIT_FUNC_TRACE(); @@ -1876,23 +2563,7 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) dpaa2_dev_close(eth_dev); - if (priv->rx_vq[0]) { - /* cleaning up queue storage */ - for (i = 0; i < priv->nb_rx_queues; i++) { - dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[i]; - if (dpaa2_q->q_storage) - rte_free(dpaa2_q->q_storage); - } - /*free the all queue memory */ - rte_free(priv->rx_vq[0]); - priv->rx_vq[0] = NULL; - } - - /* free memory for storing MAC addresses */ - if (eth_dev->data->mac_addrs) { - rte_free(eth_dev->data->mac_addrs); - eth_dev->data->mac_addrs = NULL; - } + dpaa2_free_rx_tx_queues(eth_dev); /* Close the device at underlying layer*/ ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); @@ -1904,8 +2575,17 @@ dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) /* Free the allocated memory for ethernet private data and dpni*/ priv->hw = NULL; + eth_dev->process_private = NULL; rte_free(dpni); + for (i = 0; i < MAX_TCS; i++) { + if (priv->extract.fs_extract_param[i]) + rte_free((void *)(size_t)priv->extract.fs_extract_param[i]); + } + + if (priv->extract.qos_extract_param) + rte_free((void *)(size_t)priv->extract.qos_extract_param); + eth_dev->dev_ops = NULL; eth_dev->rx_pkt_burst = NULL; eth_dev->tx_pkt_burst = NULL; @@ -1919,30 +2599,45 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, struct rte_dpaa2_device *dpaa2_dev) { struct rte_eth_dev *eth_dev; + struct dpaa2_dev_priv *dev_priv; int diag; + if ((DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE) > + RTE_PKTMBUF_HEADROOM) { + DPAA2_PMD_ERR( + "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA2 Annotation req(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA2_MBUF_HW_ANNOTATION + DPAA2_FD_PTA_SIZE); + + return -1; + } + if (rte_eal_process_type() == RTE_PROC_PRIMARY) { eth_dev = rte_eth_dev_allocate(dpaa2_dev->device.name); if (!eth_dev) return -ENODEV; - eth_dev->data->dev_private = rte_zmalloc( - "ethdev private structure", - sizeof(struct dpaa2_dev_priv), - RTE_CACHE_LINE_SIZE); - if (eth_dev->data->dev_private == NULL) { + dev_priv = rte_zmalloc("ethdev private structure", + sizeof(struct dpaa2_dev_priv), + RTE_CACHE_LINE_SIZE); + if (dev_priv == NULL) { DPAA2_PMD_CRIT( "Unable to allocate memory for private data"); rte_eth_dev_release_port(eth_dev); return -ENOMEM; } + eth_dev->data->dev_private = (void *)dev_priv; + /* Store a pointer to eth_dev in dev_private */ + dev_priv->eth_dev = eth_dev; + dev_priv->tx_conf_en = 0; } else { eth_dev = rte_eth_dev_attach_secondary(dpaa2_dev->device.name); - if (!eth_dev) + if (!eth_dev) { + DPAA2_PMD_DEBUG("returning enodev"); return -ENODEV; + } } eth_dev->device = &dpaa2_dev->device; - eth_dev->device->driver = &dpaa2_drv->driver; dpaa2_dev->eth_dev = eth_dev; eth_dev->data->rx_mbuf_alloc_failed = 0; @@ -1952,11 +2647,11 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv, /* Invoke PMD device initialization function */ diag = dpaa2_dev_init(eth_dev); - if (diag == 0) + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); return 0; + } - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); rte_eth_dev_release_port(eth_dev); return diag; } @@ -1969,8 +2664,6 @@ rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) eth_dev = dpaa2_dev->eth_dev; dpaa2_dev_uninit(eth_dev); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); rte_eth_dev_release_port(eth_dev); return 0; @@ -1984,10 +2677,10 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = { }; RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); - -RTE_INIT(dpaa2_pmd_init_log); -static void -dpaa2_pmd_init_log(void) +RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2, + DRIVER_LOOPBACK_MODE "= " + DRIVER_NO_PREFETCH_MODE "="); +RTE_INIT(dpaa2_pmd_init_log) { dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); if (dpaa2_logtype_pmd >= 0)