X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa%2Fdpaa_ethdev.c;h=13d1c6a1fc515808cf77919d95bb6947f72ccc48;hb=52799debdf1c3d9c25a5585c591e8b8959495c23;hp=9b69ef456e5dddda6ccb2550b22f3ccb32b04c83;hpb=b9083ea5e084f0cfcfca40f4452b1a92c1b1ea18;p=dpdk.git diff --git a/drivers/net/dpaa/dpaa_ethdev.c b/drivers/net/dpaa/dpaa_ethdev.c index 9b69ef456e..13d1c6a1fc 100644 --- a/drivers/net/dpaa/dpaa_ethdev.c +++ b/drivers/net/dpaa/dpaa_ethdev.c @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause * * Copyright 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2017 NXP + * Copyright 2017-2019 NXP * */ /* System headers */ @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -45,14 +46,45 @@ #include #include +int dpaa_logtype_pmd; + +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_JUMBO_FRAME | + DEV_RX_OFFLOAD_SCATTER; + +/* Rx offloads which cannot be disabled */ +static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM | + DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_RX_OFFLOAD_RSS_HASH; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_MT_LOCKFREE | + DEV_TX_OFFLOAD_MBUF_FAST_FREE; + +/* Tx offloads which cannot be disabled */ +static uint64_t dev_tx_offloads_nodis = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_SCTP_CKSUM | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | + DEV_TX_OFFLOAD_MULTI_SEGS; + /* Keep track of whether QMAN and BMAN have been globally initialized */ static int is_global_init; -/* At present we only allow up to 4 push mode queues - as each of this queue - * need dedicated portal and we are short of portals. +static int default_q; /* use default queue - FMC is not executed*/ +/* At present we only allow up to 4 push mode queues as default - as each of + * this queue need dedicated portal and we are short of portals. */ -#define DPAA_MAX_PUSH_MODE_QUEUE 4 +#define DPAA_MAX_PUSH_MODE_QUEUE 8 +#define DPAA_DEFAULT_PUSH_MODE_QUEUE 4 -static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; +static int dpaa_push_mode_max_queue = DPAA_DEFAULT_PUSH_MODE_QUEUE; static int dpaa_push_queue_idx; /* Queue index which are in push mode*/ @@ -95,6 +127,9 @@ static const struct rte_dpaa_xstats_name_off dpaa_xstats_strings[] = { static struct rte_dpaa_driver rte_dpaa_pmd; +static int +dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); + static inline void dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts) { @@ -114,17 +149,38 @@ static int dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) { struct dpaa_if *dpaa_intf = dev->data->dev_private; - uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + uint32_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + VLAN_TAG_SIZE; + uint32_t buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; PMD_INIT_FUNC_TRACE(); - if (mtu < ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) + if (mtu < RTE_ETHER_MIN_MTU || frame_size > DPAA_MAX_RX_PKT_LEN) + return -EINVAL; + /* + * Refuse mtu that requires the support of scattered packets + * when this feature has not been enabled before. + */ + if (dev->data->min_rx_buf_size && + !dev->data->scattered_rx && frame_size > buffsz) { + DPAA_PMD_ERR("SG not enabled, will not fit in one buffer"); + return -EINVAL; + } + + /* check * >= max_frame */ + if (dev->data->min_rx_buf_size && dev->data->scattered_rx && + (frame_size > buffsz * DPAA_SGT_MAX_ENTRIES)) { + DPAA_PMD_ERR("Too big to fit for Max SG list %d", + buffsz * DPAA_SGT_MAX_ENTRIES); return -EINVAL; - if (frame_size > ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.jumbo_frame = 1; + } + + if (frame_size > RTE_ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.offloads |= + DEV_RX_OFFLOAD_JUMBO_FRAME; else - dev->data->dev_conf.rxmode.jumbo_frame = 0; + dev->data->dev_conf.rxmode.offloads &= + ~DEV_RX_OFFLOAD_JUMBO_FRAME; dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; @@ -134,22 +190,58 @@ dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) } static int -dpaa_eth_dev_configure(struct rte_eth_dev *dev __rte_unused) +dpaa_eth_dev_configure(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + uint64_t rx_offloads = eth_conf->rxmode.offloads; + uint64_t tx_offloads = eth_conf->txmode.offloads; PMD_INIT_FUNC_TRACE(); - if (dev->data->dev_conf.rxmode.jumbo_frame == 1) { + /* Rx offloads which are enabled by default */ + if (dev_rx_offloads_nodis & ~rx_offloads) { + DPAA_PMD_INFO( + "Some of rx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + rx_offloads, dev_rx_offloads_nodis); + } + + /* Tx offloads which are enabled by default */ + if (dev_tx_offloads_nodis & ~tx_offloads) { + DPAA_PMD_INFO( + "Some of tx offloads enabled by default - requested 0x%" PRIx64 + " fixed are 0x%" PRIx64, + tx_offloads, dev_tx_offloads_nodis); + } + + if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { + uint32_t max_len; + + DPAA_PMD_DEBUG("enabling jumbo"); + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= - DPAA_MAX_RX_PKT_LEN) { - fman_if_set_maxfrm(dpaa_intf->fif, - dev->data->dev_conf.rxmode.max_rx_pkt_len); - return 0; - } else { - return -1; + DPAA_MAX_RX_PKT_LEN) + max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len; + else { + DPAA_PMD_INFO("enabling jumbo override conf max len=%d " + "supported is %d", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + DPAA_MAX_RX_PKT_LEN); + max_len = DPAA_MAX_RX_PKT_LEN; } + + fman_if_set_maxfrm(dpaa_intf->fif, max_len); + dev->data->mtu = max_len + - RTE_ETHER_HDR_LEN - RTE_ETHER_CRC_LEN - VLAN_TAG_SIZE; } + + if (rx_offloads & DEV_RX_OFFLOAD_SCATTER) { + DPAA_PMD_DEBUG("enabling scatter mode"); + fman_if_set_sg(dpaa_intf->fif, 1); + dev->data->scattered_rx = 1; + } + return 0; } @@ -157,12 +249,15 @@ static const uint32_t * dpaa_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { - /*todo -= add more types */ RTE_PTYPE_L2_ETHER, - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV4_EXT, - RTE_PTYPE_L3_IPV6, - RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L2_ETHER_VLAN, + RTE_PTYPE_L2_ETHER_ARP, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_ICMP, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, RTE_PTYPE_L4_TCP, RTE_PTYPE_L4_UDP, RTE_PTYPE_L4_SCTP @@ -238,32 +333,44 @@ dpaa_fw_version_get(struct rte_eth_dev *dev __rte_unused, return 0; } -static void dpaa_eth_dev_info(struct rte_eth_dev *dev, - struct rte_eth_dev_info *dev_info) +static int dpaa_eth_dev_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) { struct dpaa_if *dpaa_intf = dev->data->dev_private; - PMD_INIT_FUNC_TRACE(); + DPAA_PMD_DEBUG(": %s", dpaa_intf->name); dev_info->max_rx_queues = dpaa_intf->nb_rx_queues; dev_info->max_tx_queues = dpaa_intf->nb_tx_queues; - dev_info->min_rx_bufsize = DPAA_MIN_RX_BUF_SIZE; dev_info->max_rx_pktlen = DPAA_MAX_RX_PKT_LEN; dev_info->max_mac_addrs = DPAA_MAX_MAC_FILTER; dev_info->max_hash_mac_addrs = 0; dev_info->max_vfs = 0; dev_info->max_vmdq_pools = ETH_16_POOLS; dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL; - dev_info->speed_capa = (ETH_LINK_SPEED_1G | - ETH_LINK_SPEED_10G); - dev_info->rx_offload_capa = - (DEV_RX_OFFLOAD_IPV4_CKSUM | - DEV_RX_OFFLOAD_UDP_CKSUM | - DEV_RX_OFFLOAD_TCP_CKSUM); - dev_info->tx_offload_capa = - (DEV_TX_OFFLOAD_IPV4_CKSUM | - DEV_TX_OFFLOAD_UDP_CKSUM | - DEV_TX_OFFLOAD_TCP_CKSUM); + + if (dpaa_intf->fif->mac_type == fman_mac_1g) { + dev_info->speed_capa = ETH_LINK_SPEED_1G; + } else if (dpaa_intf->fif->mac_type == fman_mac_10g) { + dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G); + } else { + DPAA_PMD_ERR("invalid link_speed: %s, %d", + dpaa_intf->name, dpaa_intf->fif->mac_type); + return -EINVAL; + } + + dev_info->rx_offload_capa = dev_rx_offloads_sup | + dev_rx_offloads_nodis; + dev_info->tx_offload_capa = dev_tx_offloads_sup | + dev_tx_offloads_nodis; + dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE; + dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE; + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH; + dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH; + + return 0; } static int dpaa_eth_link_update(struct rte_eth_dev *dev, @@ -275,9 +382,9 @@ static int dpaa_eth_link_update(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); if (dpaa_intf->fif->mac_type == fman_mac_1g) - link->link_speed = 1000; + link->link_speed = ETH_SPEED_NUM_1G; else if (dpaa_intf->fif->mac_type == fman_mac_10g) - link->link_speed = 10000; + link->link_speed = ETH_SPEED_NUM_10G; else DPAA_PMD_ERR("invalid link_speed: %s, %d", dpaa_intf->name, dpaa_intf->fif->mac_type); @@ -299,13 +406,15 @@ static int dpaa_eth_stats_get(struct rte_eth_dev *dev, return 0; } -static void dpaa_eth_stats_reset(struct rte_eth_dev *dev) +static int dpaa_eth_stats_reset(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); fman_if_stats_reset(dpaa_intf->fif); + + return 0; } static int @@ -316,12 +425,12 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned int i = 0, num = RTE_DIM(dpaa_xstats_strings); uint64_t values[sizeof(struct dpaa_if_stats) / 8]; - if (xstats == NULL) - return 0; - if (n < num) return num; + if (xstats == NULL) + return 0; + fman_if_stats_get_all(dpaa_intf->fif, values, sizeof(struct dpaa_if_stats) / 8); @@ -335,16 +444,18 @@ dpaa_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, static int dpaa_xstats_get_names(__rte_unused struct rte_eth_dev *dev, struct rte_eth_xstat_name *xstats_names, - __rte_unused unsigned int limit) + unsigned int limit) { unsigned int i, stat_cnt = RTE_DIM(dpaa_xstats_strings); + if (limit < stat_cnt) + return stat_cnt; + if (xstats_names != NULL) for (i = 0; i < stat_cnt; i++) - snprintf(xstats_names[i].name, - sizeof(xstats_names[i].name), - "%s", - dpaa_xstats_strings[i].name); + strlcpy(xstats_names[i].name, + dpaa_xstats_strings[i].name, + sizeof(xstats_names[i].name)); return stat_cnt; } @@ -366,7 +477,7 @@ dpaa_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids, return 0; fman_if_stats_get_all(dpaa_intf->fif, values_copy, - sizeof(struct dpaa_if_stats)); + sizeof(struct dpaa_if_stats) / 8); for (i = 0; i < stat_cnt; i++) values[i] = @@ -412,40 +523,48 @@ dpaa_xstats_get_names_by_id( return limit; } -static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) +static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); fman_if_promiscuous_enable(dpaa_intf->fif); + + return 0; } -static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) +static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); fman_if_promiscuous_disable(dpaa_intf->fif); + + return 0; } -static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev) +static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); fman_if_set_mcast_filter_table(dpaa_intf->fif); + + return 0; } -static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev) +static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev) { struct dpaa_if *dpaa_intf = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); fman_if_reset_mcast_filter_table(dpaa_intf->fif); + + return 0; } static @@ -460,10 +579,41 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, struct qm_mcc_initfq opts = {0}; u32 flags = 0; int ret; + u32 buffsz = rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM; PMD_INIT_FUNC_TRACE(); - DPAA_PMD_INFO("Rx queue setup for queue index: %d", queue_idx); + if (queue_idx >= dev->data->nb_rx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_rx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, rxq->fqid); + + /* Max packet can fit in single buffer */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) { + ; + } else if (dev->data->dev_conf.rxmode.offloads & + DEV_RX_OFFLOAD_SCATTER) { + if (dev->data->dev_conf.rxmode.max_rx_pkt_len > + buffsz * DPAA_SGT_MAX_ENTRIES) { + DPAA_PMD_ERR("max RxPkt size %d too big to fit " + "MaxSGlist %d", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + buffsz * DPAA_SGT_MAX_ENTRIES); + rte_errno = EOVERFLOW; + return -rte_errno; + } + } else { + DPAA_PMD_WARN("The requested maximum Rx packet size (%u) is" + " larger than a single mbuf (%u) and scattered" + " mode has not been requested", + dev->data->dev_conf.rxmode.max_rx_pkt_len, + buffsz - RTE_PKTMBUF_HEADROOM); + } if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) { struct fman_if_ic_params icp; @@ -491,12 +641,19 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, fman_if_set_bp(dpaa_intf->fif, mp->size, dpaa_intf->bp_info->bpid, bp_size); dpaa_intf->valid = 1; - DPAA_PMD_INFO("if =%s - fd_offset = %d offset = %d", - dpaa_intf->name, fd_offset, - fman_if_get_fdoff(dpaa_intf->fif)); + DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d", + dpaa_intf->name, fd_offset, + fman_if_get_fdoff(dpaa_intf->fif)); } + DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name, + fman_if_get_sg_enable(dpaa_intf->fif), + dev->data->dev_conf.rxmode.max_rx_pkt_len); /* checking if push mode only, no error check for now */ - if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) { + if (!rxq->is_static && + dpaa_push_mode_max_queue > dpaa_push_queue_idx) { + struct qman_portal *qp; + int q_fd; + dpaa_push_queue_idx++; opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA; opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | @@ -527,13 +684,52 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; } ret = qman_init_fq(rxq, flags, &opts); - if (ret) - DPAA_PMD_ERR("Channel/Queue association failed. fqid %d" - " ret: %d", rxq->fqid, ret); - rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; - rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; + if (ret) { + DPAA_PMD_ERR("Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); + return ret; + } + if (dpaa_svr_family == SVR_LS1043A_FAMILY) { + rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb_no_prefetch; + } else { + rxq->cb.dqrr_dpdk_pull_cb = dpaa_rx_cb; + rxq->cb.dqrr_prepare = dpaa_rx_cb_prepare; + } + rxq->is_static = true; + + /* Allocate qman specific portals */ + qp = fsl_qman_fq_portal_create(&q_fd); + if (!qp) { + DPAA_PMD_ERR("Unable to alloc fq portal"); + return -1; + } + rxq->qp = qp; + + /* Set up the device interrupt handler */ + if (!dev->intr_handle) { + struct rte_dpaa_device *dpaa_dev; + struct rte_device *rdev = dev->device; + + dpaa_dev = container_of(rdev, struct rte_dpaa_device, + device); + dev->intr_handle = &dpaa_dev->intr_handle; + dev->intr_handle->intr_vec = rte_zmalloc(NULL, + dpaa_push_mode_max_queue, 0); + if (!dev->intr_handle->intr_vec) { + DPAA_PMD_ERR("intr_vec alloc failed"); + return -ENOMEM; + } + dev->intr_handle->nb_efd = dpaa_push_mode_max_queue; + dev->intr_handle->max_intr = dpaa_push_mode_max_queue; + } + + dev->intr_handle->type = RTE_INTR_HANDLE_EXT; + dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1; + dev->intr_handle->efds[queue_idx] = q_fd; + rxq->q_fd = q_fd; } + rxq->bp_array = rte_dpaa_bpid_info; dev->data->rx_queues[queue_idx] = rxq; /* configure the CGR size as per the desc size */ @@ -553,7 +749,7 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, return 0; } -int __rte_experimental +int dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, int eth_rx_queue_id, u16 ch_id, @@ -566,7 +762,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, struct qm_mcc_initfq opts = {0}; if (dpaa_push_mode_max_queue) - DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n" + DPAA_PMD_WARN("PUSH mode q and EVENTDEV are not compatible\n" + "PUSH mode already enabled for first %d queues.\n" "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n", dpaa_push_mode_max_queue); @@ -604,8 +801,8 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, ret = qman_init_fq(rxq, flags, &opts); if (ret) { - DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d", - rxq->fqid, ret); + DPAA_PMD_ERR("Ev-Channel/Q association failed. fqid 0x%x " + "ret:%d(%s)", rxq->fqid, ret, strerror(ret)); return ret; } @@ -616,7 +813,7 @@ dpaa_eth_eventq_attach(const struct rte_eth_dev *dev, return ret; } -int __rte_experimental +int dpaa_eth_eventq_detach(const struct rte_eth_dev *dev, int eth_rx_queue_id) { @@ -662,7 +859,15 @@ int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, PMD_INIT_FUNC_TRACE(); - DPAA_PMD_INFO("Tx queue setup for queue index: %d", queue_idx); + if (queue_idx >= dev->data->nb_tx_queues) { + rte_errno = EOVERFLOW; + DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)", + (void *)dev, queue_idx, dev->data->nb_tx_queues); + return -rte_errno; + } + + DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)", + queue_idx, dpaa_intf->tx_queues[queue_idx].fqid); dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx]; return 0; } @@ -682,8 +887,8 @@ dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) PMD_INIT_FUNC_TRACE(); if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) { - RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n", - rx_queue_id, frm_cnt); + DPAA_PMD_DEBUG("RX frame count for q(%d) is %u", + rx_queue_id, frm_cnt); } return frm_cnt; } @@ -785,7 +990,7 @@ dpaa_flow_ctrl_get(struct rte_eth_dev *dev, static int dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr, + struct rte_ether_addr *addr, uint32_t index, __rte_unused uint32_t pool) { @@ -797,8 +1002,7 @@ dpaa_dev_add_mac_addr(struct rte_eth_dev *dev, ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index); if (ret) - RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:" - " err = %d", ret); + DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret); return 0; } @@ -813,9 +1017,9 @@ dpaa_dev_remove_mac_addr(struct rte_eth_dev *dev, fman_if_clear_mac_addr(dpaa_intf->fif, index); } -static void +static int dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, - struct ether_addr *addr) + struct rte_ether_addr *addr) { int ret; struct dpaa_if *dpaa_intf = dev->data->dev_private; @@ -824,7 +1028,43 @@ dpaa_dev_set_mac_addr(struct rte_eth_dev *dev, ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0); if (ret) - RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret); + DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret); + + return ret; +} + +static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; + + if (!rxq->is_static) + return -EINVAL; + + return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI); +} + +static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev, + uint16_t queue_id) +{ + struct dpaa_if *dpaa_intf = dev->data->dev_private; + struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id]; + uint32_t temp; + ssize_t temp1; + + if (!rxq->is_static) + return -EINVAL; + + qman_fq_portal_irqsource_remove(rxq->qp, ~0); + + temp1 = read(rxq->q_fd, &temp, sizeof(temp)); + if (temp1 != sizeof(temp)) + DPAA_PMD_ERR("irq read error"); + + qman_fq_portal_thread_irq(rxq->qp); + + return 0; } static struct eth_dev_ops dpaa_devops = { @@ -864,6 +1104,9 @@ static struct eth_dev_ops dpaa_devops = { .mac_addr_set = dpaa_dev_set_mac_addr, .fw_version_get = dpaa_fw_version_get, + + .rx_queue_intr_enable = dpaa_dev_queue_intr_enable, + .rx_queue_intr_disable = dpaa_dev_queue_intr_disable, }; static bool @@ -882,7 +1125,7 @@ is_dpaa_supported(struct rte_eth_dev *dev) return is_device_supported(dev, &rte_dpaa_pmd); } -int __rte_experimental +int rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on) { struct rte_eth_dev *dev; @@ -938,7 +1181,7 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, { struct qm_mcc_initfq opts = {0}; int ret; - u32 flags = 0; + u32 flags = QMAN_FQ_FLAG_NO_ENQUEUE; struct qm_mcc_initcgr cgr_opts = { .we_mask = QM_CGR_WE_CS_THRES | QM_CGR_WE_CSTD_EN | @@ -949,19 +1192,20 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, } }; - PMD_INIT_FUNC_TRACE(); - - ret = qman_reserve_fqid(fqid); - if (ret) { - DPAA_PMD_ERR("reserve rx fqid %d failed with ret: %d", - fqid, ret); - return -EINVAL; + if (fqid) { + ret = qman_reserve_fqid(fqid); + if (ret) { + DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d", + fqid, ret); + return -EINVAL; + } + } else { + flags |= QMAN_FQ_FLAG_DYNAMIC_FQID; } - - DPAA_PMD_DEBUG("creating rx fq %p, fqid %d", fq, fqid); - ret = qman_create_fq(fqid, QMAN_FQ_FLAG_NO_ENQUEUE, fq); + DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid); + ret = qman_create_fq(fqid, flags, fq); if (ret) { - DPAA_PMD_ERR("create rx fqid %d failed with ret: %d", + DPAA_PMD_ERR("create rx fqid 0x%x failed with ret: %d", fqid, ret); return ret; } @@ -977,8 +1221,8 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, &cgr_opts); if (ret) { DPAA_PMD_WARN( - "rx taildrop init fail on rx fqid %d (ret=%d)", - fqid, ret); + "rx taildrop init fail on rx fqid 0x%x(ret=%d)", + fq->fqid, ret); goto without_cgr; } opts.we_mask |= QM_INITFQ_WE_CGID; @@ -986,9 +1230,9 @@ static int dpaa_rx_queue_init(struct qman_fq *fq, struct qman_cgr *cgr_rx, opts.fqd.fq_ctrl |= QM_FQCTRL_CGE; } without_cgr: - ret = qman_init_fq(fq, flags, &opts); + ret = qman_init_fq(fq, 0, &opts); if (ret) - DPAA_PMD_ERR("init rx fqid %d failed with ret: %d", fqid, ret); + DPAA_PMD_ERR("init rx fqid 0x%x failed with ret:%d", fqid, ret); return ret; } @@ -999,8 +1243,6 @@ static int dpaa_tx_queue_init(struct qman_fq *fq, struct qm_mcc_initfq opts = {0}; int ret; - PMD_INIT_FUNC_TRACE(); - ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID | QMAN_FQ_FLAG_TO_DCPORTAL, fq); if (ret) { @@ -1016,10 +1258,10 @@ static int dpaa_tx_queue_init(struct qman_fq *fq, /* no tx-confirmation */ opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi; opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo; - DPAA_PMD_DEBUG("init tx fq %p, fqid %d", fq, fq->fqid); + DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid); ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); if (ret) - DPAA_PMD_ERR("init tx fqid %d failed %d", fq->fqid, ret); + DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret); return ret; } @@ -1060,7 +1302,7 @@ static int dpaa_debug_queue_init(struct qman_fq *fq, uint32_t fqid) static int dpaa_dev_init(struct rte_eth_dev *eth_dev) { - int num_cores, num_rx_fqs, fqid; + int num_rx_fqs, fqid; int loop, ret = 0; int dev_id; struct rte_dpaa_device *dpaa_device; @@ -1069,17 +1311,28 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) struct fman_if *fman_intf; struct fman_if_bpool *bp, *tmp_bp; uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES]; + char eth_buf[RTE_ETHER_ADDR_FMT_SIZE]; PMD_INIT_FUNC_TRACE(); + dpaa_intf = eth_dev->data->dev_private; /* For secondary processes, the primary has done all the work */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY) + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + eth_dev->dev_ops = &dpaa_devops; + /* Plugging of UCODE burst API not supported in Secondary */ + eth_dev->rx_pkt_burst = dpaa_eth_queue_rx; + eth_dev->tx_pkt_burst = dpaa_eth_queue_tx; +#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP + qman_set_fq_lookup_table( + dpaa_intf->rx_queues->qman_fq_lookup_table); +#endif return 0; + } dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device); dev_id = dpaa_device->id.dev_id; dpaa_intf = eth_dev->data->dev_private; - cfg = &dpaa_netcfg->port_cfg[dev_id]; + cfg = dpaa_get_eth_port_cfg(dev_id); fman_intf = cfg->fman_if; dpaa_intf->name = dpaa_device->name; @@ -1090,25 +1343,20 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->cfg = cfg; /* Initialize Rx FQ's */ - if (getenv("DPAA_NUM_RX_QUEUES")) - num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); - else + if (default_q) { num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; - - /* if push mode queues to be enabled. Currenly we are allowing only - * one queue per thread. - */ - if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { - dpaa_push_mode_max_queue = - atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); - if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) - dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; + } else { + if (getenv("DPAA_NUM_RX_QUEUES")) + num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES")); + else + num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES; } - /* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX + + /* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX * queues. */ - if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_PCD_FQID_MULTIPLIER) { + if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) { DPAA_PMD_ERR("Invalid number of RX queues\n"); return -EINVAL; } @@ -1141,8 +1389,11 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) } for (loop = 0; loop < num_rx_fqs; loop++) { - fqid = DPAA_PCD_FQID_START + dpaa_intf->ifid * - DPAA_PCD_FQID_MULTIPLIER + loop; + if (default_q) + fqid = cfg->rx_def; + else + fqid = DPAA_PCD_FQID_START + dpaa_intf->fif->mac_idx * + DPAA_PCD_FQID_MULTIPLIER + loop; if (dpaa_intf->cgr_rx) dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop]; @@ -1157,23 +1408,22 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) dpaa_intf->nb_rx_queues = num_rx_fqs; /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */ - num_cores = rte_lcore_count(); dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) * - num_cores, MAX_CACHELINE); + MAX_DPAA_CORES, MAX_CACHELINE); if (!dpaa_intf->tx_queues) { DPAA_PMD_ERR("Failed to alloc mem for TX queues\n"); ret = -ENOMEM; goto free_rx; } - for (loop = 0; loop < num_cores; loop++) { + for (loop = 0; loop < MAX_DPAA_CORES; loop++) { ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop], fman_intf); if (ret) goto free_tx; dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf; } - dpaa_intf->nb_tx_queues = num_cores; + dpaa_intf->nb_tx_queues = MAX_DPAA_CORES; #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER dpaa_debug_queue_init(&dpaa_intf->debug_queues[ @@ -1192,7 +1442,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) /* reset bpool list, initialize bpool dynamically */ list_for_each_entry_safe(bp, tmp_bp, &cfg->fman_if->bpool_list, node) { list_del(&bp->node); - free(bp); + rte_free(bp); } /* Populate ethdev structure */ @@ -1202,26 +1452,20 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", - ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); + RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER, 0); if (eth_dev->data->mac_addrs == NULL) { DPAA_PMD_ERR("Failed to allocate %d bytes needed to " "store MAC addresses", - ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); + RTE_ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER); ret = -ENOMEM; goto free_tx; } /* copy the primary mac address */ - ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); + rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]); + rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr); - RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n", - dpaa_device->name, - fman_intf->mac_addr.addr_bytes[0], - fman_intf->mac_addr.addr_bytes[1], - fman_intf->mac_addr.addr_bytes[2], - fman_intf->mac_addr.addr_bytes[3], - fman_intf->mac_addr.addr_bytes[4], - fman_intf->mac_addr.addr_bytes[5]); + DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf); /* Disable RX mode */ fman_if_discard_rx_errors(fman_intf); @@ -1232,6 +1476,9 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev) fman_if_reset_mcast_filter_table(fman_intf); /* Reset interface statistics */ fman_if_stats_reset(fman_intf); + /* Disable SG by default */ + fman_if_set_sg(fman_intf, 0); + fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE); return 0; @@ -1288,10 +1535,6 @@ dpaa_dev_uninit(struct rte_eth_dev *dev) rte_free(dpaa_intf->tx_queues); dpaa_intf->tx_queues = NULL; - /* free memory for storing MAC addresses */ - rte_free(dev->data->mac_addrs); - dev->data->mac_addrs = NULL; - dev->dev_ops = NULL; dev->rx_pkt_burst = NULL; dev->tx_pkt_burst = NULL; @@ -1300,7 +1543,7 @@ dpaa_dev_uninit(struct rte_eth_dev *dev) } static int -rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, +rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused, struct rte_dpaa_device *dpaa_dev) { int diag; @@ -1309,6 +1552,16 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, PMD_INIT_FUNC_TRACE(); + if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) > + RTE_PKTMBUF_HEADROOM) { + DPAA_PMD_ERR( + "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)", + RTE_PKTMBUF_HEADROOM, + DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE); + + return -1; + } + /* In case of secondary process, the device is already configured * and no further action is required, except portal initialization * and verifying secondary attachment to port name. @@ -1317,22 +1570,30 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); if (!eth_dev) return -ENOMEM; + eth_dev->device = &dpaa_dev->device; + eth_dev->dev_ops = &dpaa_devops; + rte_eth_dev_probing_finish(eth_dev); return 0; } - if (!is_global_init) { - /* One time load of Qman/Bman drivers */ - ret = qman_global_init(); - if (ret) { - DPAA_PMD_ERR("QMAN initialization failed: %d", - ret); - return ret; + if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) { + if (access("/tmp/fmc.bin", F_OK) == -1) { + DPAA_PMD_INFO("* FMC not configured.Enabling default mode"); + default_q = 1; } - ret = bman_global_init(); - if (ret) { - DPAA_PMD_ERR("BMAN initialization failed: %d", - ret); - return ret; + + /* disabling the default push mode for LS1043 */ + if (dpaa_svr_family == SVR_LS1043A_FAMILY) + dpaa_push_mode_max_queue = 0; + + /* if push mode queues to be enabled. Currenly we are allowing + * only one queue per thread. + */ + if (getenv("DPAA_PUSH_QUEUES_NUMBER")) { + dpaa_push_mode_max_queue = + atoi(getenv("DPAA_PUSH_QUEUES_NUMBER")); + if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE) + dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE; } is_global_init = 1; @@ -1346,31 +1607,38 @@ rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv, } } - eth_dev = rte_eth_dev_allocate(dpaa_dev->name); - if (eth_dev == NULL) - return -ENOMEM; + /* In case of secondary process, the device is already configured + * and no further action is required, except portal initialization + * and verifying secondary attachment to port name. + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + eth_dev = rte_eth_dev_attach_secondary(dpaa_dev->name); + if (!eth_dev) + return -ENOMEM; + } else { + eth_dev = rte_eth_dev_allocate(dpaa_dev->name); + if (eth_dev == NULL) + return -ENOMEM; - eth_dev->data->dev_private = rte_zmalloc( - "ethdev private structure", - sizeof(struct dpaa_if), - RTE_CACHE_LINE_SIZE); - if (!eth_dev->data->dev_private) { - DPAA_PMD_ERR("Cannot allocate memzone for port data"); - rte_eth_dev_release_port(eth_dev); - return -ENOMEM; + eth_dev->data->dev_private = rte_zmalloc( + "ethdev private structure", + sizeof(struct dpaa_if), + RTE_CACHE_LINE_SIZE); + if (!eth_dev->data->dev_private) { + DPAA_PMD_ERR("Cannot allocate memzone for port data"); + rte_eth_dev_release_port(eth_dev); + return -ENOMEM; + } } - eth_dev->device = &dpaa_dev->device; - eth_dev->device->driver = &dpaa_drv->driver; dpaa_dev->eth_dev = eth_dev; /* Invoke PMD device initialization function */ diag = dpaa_dev_init(eth_dev); - if (diag == 0) + if (diag == 0) { + rte_eth_dev_probing_finish(eth_dev); return 0; - - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); + } rte_eth_dev_release_port(eth_dev); return diag; @@ -1386,9 +1654,6 @@ rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev) eth_dev = dpaa_dev->eth_dev; dpaa_dev_uninit(eth_dev); - if (rte_eal_process_type() == RTE_PROC_PRIMARY) - rte_free(eth_dev->data->dev_private); - rte_eth_dev_release_port(eth_dev); return 0; @@ -1401,3 +1666,9 @@ static struct rte_dpaa_driver rte_dpaa_pmd = { }; RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd); +RTE_INIT(dpaa_net_init_log) +{ + dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa"); + if (dpaa_logtype_pmd >= 0) + rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE); +}