X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fdpaa2%2Fdpaa2_ethdev.c;h=d8624514df67713fef15a01686b7ab6ee972d35e;hb=056ec485861d3e0c9e9ad612e2dca70f66f83af9;hp=848bb421e1a9f93c2d666cd197c30f9eceef50c8;hpb=72ec7a678e704d5b3d74d51663ce98ff8293b00f;p=dpdk.git diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c index 848bb421e1..d8624514df 100644 --- a/drivers/net/dpaa2/dpaa2_ethdev.c +++ b/drivers/net/dpaa2/dpaa2_ethdev.c @@ -1,7 +1,7 @@ /* * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved. - * Copyright 2016 NXP + * Copyright 2016-2020 NXP * */ @@ -45,6 +45,7 @@ static uint64_t dev_rx_offloads_sup = /* Rx offloads which cannot be disabled */ static uint64_t dev_rx_offloads_nodis = + DEV_RX_OFFLOAD_RSS_HASH | DEV_RX_OFFLOAD_SCATTER; /* Supported Tx offloads */ @@ -63,7 +64,7 @@ static uint64_t dev_tx_offloads_nodis = DEV_TX_OFFLOAD_MULTI_SEGS; /* enable timestamp in mbuf */ -enum pmd_dpaa2_ts dpaa2_enable_ts; +bool dpaa2_enable_ts[RTE_MAX_ETHPORTS]; struct rte_dpaa2_xstats_name_off { char name[RTE_ETH_XSTATS_NAME_SIZE]; @@ -98,21 +99,12 @@ static const enum rte_filter_op dpaa2_supported_filter_ops[] = { }; static struct rte_dpaa2_driver rte_dpaa2_pmd; -static int dpaa2_dev_uninit(struct rte_eth_dev *eth_dev); static int dpaa2_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static int dpaa2_dev_set_link_up(struct rte_eth_dev *dev); static int dpaa2_dev_set_link_down(struct rte_eth_dev *dev); static int dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); -int dpaa2_logtype_pmd; - -void -rte_pmd_dpaa2_set_timestamp(enum pmd_dpaa2_ts enable) -{ - dpaa2_enable_ts = enable; -} - static int dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { @@ -128,8 +120,8 @@ dpaa2_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) } if (on) - ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, - priv->token, vlan_id); + ret = dpni_add_vlan_id(dpni, CMD_PRI_LOW, priv->token, + vlan_id, 0, 0, 0); else ret = dpni_remove_vlan_id(dpni, CMD_PRI_LOW, priv->token, vlan_id); @@ -146,7 +138,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = dev->process_private; - int ret; + int ret = 0; PMD_INIT_FUNC_TRACE(); @@ -154,7 +146,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) /* VLAN Filter not avaialble */ if (!priv->max_vlan_filters) { DPAA2_PMD_INFO("VLAN filter not available"); - goto next_mask; + return -ENOTSUP; } if (dev->data->dev_conf.rxmode.offloads & @@ -167,14 +159,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask) if (ret < 0) DPAA2_PMD_INFO("Unable to set vlan filter = %d", ret); } -next_mask: - if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.offloads & - DEV_RX_OFFLOAD_VLAN_EXTEND) - DPAA2_PMD_INFO("VLAN extend offload not supported"); - } - return 0; + return ret; } static int @@ -254,8 +240,6 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) PMD_INIT_FUNC_TRACE(); - dev_info->if_index = priv->hw_id; - dev_info->max_mac_addrs = priv->max_mac_filters; dev_info->max_rx_pktlen = DPAA2_MAX_RX_PKT_LEN; dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE; @@ -274,9 +258,96 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_vmdq_pools = ETH_16_POOLS; dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL; + dev_info->default_rxportconf.burst_size = dpaa2_dqrr_size; + /* same is rx size for best perf */ + dev_info->default_txportconf.burst_size = dpaa2_dqrr_size; + + dev_info->default_rxportconf.nb_queues = 1; + dev_info->default_txportconf.nb_queues = 1; + dev_info->default_txportconf.ring_size = CONG_ENTER_TX_THRESHOLD; + dev_info->default_rxportconf.ring_size = DPAA2_RX_DEFAULT_NBDESC; + + if (dpaa2_svr_family == SVR_LX2160A) { + dev_info->speed_capa |= ETH_LINK_SPEED_25G | + ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | + ETH_LINK_SPEED_100G; + } + return 0; } +static int +dpaa2_dev_rx_burst_mode_get(struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + int ret = -EINVAL; + unsigned int i; + const struct burst_info { + uint64_t flags; + const char *output; + } rx_offload_map[] = { + {DEV_RX_OFFLOAD_CHECKSUM, " Checksum,"}, + {DEV_RX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, + {DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, + {DEV_RX_OFFLOAD_OUTER_UDP_CKSUM, " Outer UDP csum,"}, + {DEV_RX_OFFLOAD_VLAN_STRIP, " VLAN strip,"}, + {DEV_RX_OFFLOAD_VLAN_FILTER, " VLAN filter,"}, + {DEV_RX_OFFLOAD_JUMBO_FRAME, " Jumbo frame,"}, + {DEV_RX_OFFLOAD_TIMESTAMP, " Timestamp,"}, + {DEV_RX_OFFLOAD_RSS_HASH, " RSS,"}, + {DEV_RX_OFFLOAD_SCATTER, " Scattered,"} + }; + + /* Update Rx offload info */ + for (i = 0; i < RTE_DIM(rx_offload_map); i++) { + if (eth_conf->rxmode.offloads & rx_offload_map[i].flags) { + snprintf(mode->info, sizeof(mode->info), "%s", + rx_offload_map[i].output); + ret = 0; + break; + } + } + return ret; +} + +static int +dpaa2_dev_tx_burst_mode_get(struct rte_eth_dev *dev, + __rte_unused uint16_t queue_id, + struct rte_eth_burst_mode *mode) +{ + struct rte_eth_conf *eth_conf = &dev->data->dev_conf; + int ret = -EINVAL; + unsigned int i; + const struct burst_info { + uint64_t flags; + const char *output; + } tx_offload_map[] = { + {DEV_TX_OFFLOAD_VLAN_INSERT, " VLAN Insert,"}, + {DEV_TX_OFFLOAD_IPV4_CKSUM, " IPV4 csum,"}, + {DEV_TX_OFFLOAD_UDP_CKSUM, " UDP csum,"}, + {DEV_TX_OFFLOAD_TCP_CKSUM, " TCP csum,"}, + {DEV_TX_OFFLOAD_SCTP_CKSUM, " SCTP csum,"}, + {DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM, " Outer IPV4 csum,"}, + {DEV_TX_OFFLOAD_MT_LOCKFREE, " MT lockfree,"}, + {DEV_TX_OFFLOAD_MBUF_FAST_FREE, " MBUF free disable,"}, + {DEV_TX_OFFLOAD_MULTI_SEGS, " Scattered,"} + }; + + /* Update Tx offload info */ + for (i = 0; i < RTE_DIM(tx_offload_map); i++) { + if (eth_conf->txmode.offloads & tx_offload_map[i].flags) { + snprintf(mode->info, sizeof(mode->info), "%s", + tx_offload_map[i].output); + ret = 0; + break; + } + } + return ret; +} + static int dpaa2_alloc_rx_tx_queues(struct rte_eth_dev *dev) { @@ -438,7 +509,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) int rx_l4_csum_offload = false; int tx_l3_csum_offload = false; int tx_l4_csum_offload = false; - int ret; + int ret, tc_index; PMD_INIT_FUNC_TRACE(); @@ -478,12 +549,16 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) } if (eth_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) { - ret = dpaa2_setup_flow_dist(dev, - eth_conf->rx_adv_conf.rss_conf.rss_hf); - if (ret) { - DPAA2_PMD_ERR("Unable to set flow distribution." - "Check queue config"); - return ret; + for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { + ret = dpaa2_setup_flow_dist(dev, + eth_conf->rx_adv_conf.rss_conf.rss_hf, + tc_index); + if (ret) { + DPAA2_PMD_ERR( + "Unable to set flow distribution on tc%d." + "Check queue config", tc_index); + return ret; + } } } @@ -509,8 +584,10 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) return ret; } +#if !defined(RTE_LIBRTE_IEEE1588) if (rx_offloads & DEV_RX_OFFLOAD_TIMESTAMP) - dpaa2_enable_ts = true; +#endif + dpaa2_enable_ts[dev->data->port_id] = true; if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) tx_l3_csum_offload = true; @@ -552,9 +629,6 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev) if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK); - /* update the current status */ - dpaa2_dev_link_update(dev, 0); - return 0; } @@ -566,7 +640,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, uint16_t nb_rx_desc, unsigned int socket_id __rte_unused, - const struct rte_eth_rxconf *rx_conf __rte_unused, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mb_pool) { struct dpaa2_dev_priv *priv = dev->data->dev_private; @@ -583,6 +657,13 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, DPAA2_PMD_DEBUG("dev =%p, queue =%d, pool = %p, conf =%p", dev, rx_queue_id, mb_pool, rx_conf); + /* Rx deferred start is not supported */ + if (rx_conf->rx_deferred_start) { + DPAA2_PMD_ERR("%p:Rx deferred start not supported", + (void *)dev); + return -EINVAL; + } + if (!priv->bp_list || priv->bp_list->mp != mb_pool) { bpid = mempool_to_bpid(mb_pool); ret = dpaa2_attach_bp_list(priv, @@ -593,6 +674,8 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, dpaa2_q = (struct dpaa2_queue *)priv->rx_vq[rx_queue_id]; dpaa2_q->mb_pool = mb_pool; /**< mbuf pool to populate RX ring. */ dpaa2_q->bp_array = rte_dpaa2_bpid_info; + dpaa2_q->nb_desc = UINT16_MAX; + dpaa2_q->offloads = rx_conf->offloads; /*Get the flow id from given VQ id*/ flow_id = dpaa2_q->flow_id; @@ -645,7 +728,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, struct dpni_taildrop taildrop; taildrop.enable = 1; - + dpaa2_q->nb_desc = nb_rx_desc; /* Private CGR will use tail drop length as nb_rx_desc. * for rest cases we can use standard byte based tail drop. * There is no HW restriction, but number of CGRs are limited, @@ -662,7 +745,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, dpaa2_q->tc_index, - flow_id, &taildrop); + dpaa2_q->cgid, &taildrop); } else { /*enabling per rx queue congestion control */ taildrop.threshold = CONG_THRESHOLD_RX_BYTES_Q; @@ -689,7 +772,7 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, DPNI_CP_CONGESTION_GROUP, DPNI_QUEUE_RX, dpaa2_q->tc_index, - flow_id, &taildrop); + dpaa2_q->cgid, &taildrop); } else { ret = dpni_set_taildrop(dpni, CMD_PRI_LOW, priv->token, DPNI_CP_QUEUE, DPNI_QUEUE_RX, @@ -709,9 +792,9 @@ dpaa2_dev_rx_queue_setup(struct rte_eth_dev *dev, static int dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, - uint16_t nb_tx_desc __rte_unused, + uint16_t nb_tx_desc, unsigned int socket_id __rte_unused, - const struct rte_eth_txconf *tx_conf __rte_unused) + const struct rte_eth_txconf *tx_conf) { struct dpaa2_dev_priv *priv = dev->data->dev_private; struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *) @@ -728,6 +811,16 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, PMD_INIT_FUNC_TRACE(); + /* Tx deferred start is not supported */ + if (tx_conf->tx_deferred_start) { + DPAA2_PMD_ERR("%p:Tx deferred start not supported", + (void *)dev); + return -EINVAL; + } + + dpaa2_q->nb_desc = UINT16_MAX; + dpaa2_q->offloads = tx_conf->offloads; + /* Return if queue already configured */ if (dpaa2_q->flow_id != 0xffff) { dev->data->tx_queues[tx_queue_id] = dpaa2_q; @@ -741,11 +834,11 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, flow_id = 0; ret = dpni_set_queue(dpni, CMD_PRI_LOW, priv->token, DPNI_QUEUE_TX, - tc_id, flow_id, options, &tx_flow_cfg); + tc_id, flow_id, options, &tx_flow_cfg); if (ret) { DPAA2_PMD_ERR("Error in setting the tx flow: " - "tc_id=%d, flow=%d err=%d", - tc_id, flow_id, ret); + "tc_id=%d, flow=%d err=%d", + tc_id, flow_id, ret); return -1; } @@ -781,12 +874,14 @@ dpaa2_dev_tx_queue_setup(struct rte_eth_dev *dev, if (!(priv->flags & DPAA2_TX_CGR_OFF)) { struct dpni_congestion_notification_cfg cong_notif_cfg = {0}; + dpaa2_q->nb_desc = nb_tx_desc; + cong_notif_cfg.units = DPNI_CONGESTION_UNIT_FRAMES; - cong_notif_cfg.threshold_entry = CONG_ENTER_TX_THRESHOLD; + cong_notif_cfg.threshold_entry = nb_tx_desc; /* Notify that the queue is not congested when the data in * the queue is below this thershold. */ - cong_notif_cfg.threshold_exit = CONG_EXIT_TX_THRESHOLD; + cong_notif_cfg.threshold_exit = nb_tx_desc - 24; cong_notif_cfg.message_ctx = 0; cong_notif_cfg.message_iova = (size_t)DPAA2_VADDR_TO_IOVA(dpaa2_q->cscn); @@ -884,12 +979,12 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct qbman_fq_query_np_rslt state; uint32_t frame_cnt = 0; - PMD_INIT_FUNC_TRACE(); - if (unlikely(!DPAA2_PER_LCORE_DPIO)) { ret = dpaa2_affine_qbman_swp(); if (ret) { - DPAA2_PMD_ERR("Failure in affining portal"); + DPAA2_PMD_ERR( + "Failed to allocate IO portal, tid: %d\n", + rte_gettid()); return -EINVAL; } } @@ -899,7 +994,7 @@ dpaa2_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) if (qbman_fq_query_state(swp, dpaa2_q->fqid, &state) == 0) { frame_cnt = qbman_fq_state_frame_count(&state); - DPAA2_PMD_DEBUG("RX frame count for q(%d) is %u", + DPAA2_PMD_DP_DEBUG("RX frame count for q(%d) is %u", rx_queue_id, frame_cnt); } return frame_cnt; @@ -967,8 +1062,7 @@ dpaa2_interrupt_handler(void *param) clear = DPNI_IRQ_EVENT_LINK_CHANGED; dpaa2_dev_link_update(dev, 0); /* calling all the apps registered for link status event */ - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, - NULL); + rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); } out: ret = dpni_clear_irq_status(dpni, CMD_PRI_LOW, priv->token, @@ -1141,27 +1235,61 @@ dpaa2_dev_stop(struct rte_eth_dev *dev) rte_eth_linkstatus_set(dev, &link); } -static void +static int dpaa2_dev_close(struct rte_eth_dev *dev) { struct dpaa2_dev_priv *priv = dev->data->dev_private; struct fsl_mc_io *dpni = (struct fsl_mc_io *)dev->process_private; - int ret; + int i, ret; struct rte_eth_link link; PMD_INIT_FUNC_TRACE(); - dpaa2_flow_clean(dev); + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + if (!dpni) { + DPAA2_PMD_WARN("Already closed or not started"); + return -1; + } + + dpaa2_flow_clean(dev); /* Clean the device first */ ret = dpni_reset(dpni, CMD_PRI_LOW, priv->token); if (ret) { DPAA2_PMD_ERR("Failure cleaning dpni device: err=%d", ret); - return; + return -1; } memset(&link, 0, sizeof(link)); rte_eth_linkstatus_set(dev, &link); + + /* Free private queues memory */ + dpaa2_free_rx_tx_queues(dev); + /* Close the device at underlying layer*/ + ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); + if (ret) { + DPAA2_PMD_ERR("Failure closing dpni device with err code %d", + ret); + } + + /* Free the allocated memory for ethernet private data and dpni*/ + priv->hw = NULL; + dev->process_private = NULL; + rte_free(dpni); + + for (i = 0; i < MAX_TCS; i++) + rte_free((void *)(size_t)priv->extract.tc_extract_param[i]); + + if (priv->extract.qos_extract_param) + rte_free((void *)(size_t)priv->extract.qos_extract_param); + + dev->dev_ops = NULL; + dev->rx_pkt_burst = NULL; + dev->tx_pkt_burst = NULL; + + DPAA2_PMD_INFO("%s: netdev deleted", dev->data->name); + return 0; } static int @@ -1288,7 +1416,7 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; if (frame_size > RTE_ETHER_MAX_LEN) - dev->data->dev_conf.rxmode.offloads &= + dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; else dev->data->dev_conf.rxmode.offloads &= @@ -1326,8 +1454,8 @@ dpaa2_dev_add_mac_addr(struct rte_eth_dev *dev, return -1; } - ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, - priv->token, addr->addr_bytes); + ret = dpni_add_mac_addr(dpni, CMD_PRI_LOW, priv->token, + addr->addr_bytes, 0, 0, 0); if (ret) DPAA2_PMD_ERR( "error: Adding the MAC ADDR failed: err = %d", ret); @@ -1756,6 +1884,7 @@ dpaa2_dev_set_link_up(struct rte_eth_dev *dev) /* changing tx burst function to start enqueues */ dev->tx_pkt_burst = dpaa2_dev_tx; dev->data->dev_link.link_status = state.up; + dev->data->dev_link.link_speed = state.rate; if (state.up) DPAA2_PMD_INFO("Port %d Link is Up", dev->data->port_id); @@ -1969,22 +2098,31 @@ dpaa2_dev_rss_hash_update(struct rte_eth_dev *dev, struct rte_eth_rss_conf *rss_conf) { struct rte_eth_dev_data *data = dev->data; + struct dpaa2_dev_priv *priv = data->dev_private; struct rte_eth_conf *eth_conf = &data->dev_conf; - int ret; + int ret, tc_index; PMD_INIT_FUNC_TRACE(); if (rss_conf->rss_hf) { - ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf); - if (ret) { - DPAA2_PMD_ERR("Unable to set flow dist"); - return ret; + for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { + ret = dpaa2_setup_flow_dist(dev, rss_conf->rss_hf, + tc_index); + if (ret) { + DPAA2_PMD_ERR("Unable to set flow dist on tc%d", + tc_index); + return ret; + } } } else { - ret = dpaa2_remove_flow_dist(dev, 0); - if (ret) { - DPAA2_PMD_ERR("Unable to remove flow dist"); - return ret; + for (tc_index = 0; tc_index < priv->num_rx_tc; tc_index++) { + ret = dpaa2_remove_flow_dist(dev, tc_index); + if (ret) { + DPAA2_PMD_ERR( + "Unable to remove flow dist on tc%d", + tc_index); + return ret; + } } } eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf; @@ -2006,7 +2144,7 @@ dpaa2_dev_rss_hash_conf_get(struct rte_eth_dev *dev, int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, int eth_rx_queue_id, - uint16_t dpcon_id, + struct dpaa2_dpcon_dev *dpcon, const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) { struct dpaa2_dev_priv *eth_priv = dev->data->dev_private; @@ -2014,7 +2152,7 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, struct dpaa2_queue *dpaa2_ethq = eth_priv->rx_vq[eth_rx_queue_id]; uint8_t flow_id = dpaa2_ethq->flow_id; struct dpni_queue cfg; - uint8_t options; + uint8_t options, priority; int ret; if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL) @@ -2026,11 +2164,14 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev, else return -EINVAL; + priority = (RTE_EVENT_DEV_PRIORITY_LOWEST / queue_conf->ev.priority) * + (dpcon->num_priorities - 1); + memset(&cfg, 0, sizeof(struct dpni_queue)); options = DPNI_QUEUE_OPT_DEST; cfg.destination.type = DPNI_DEST_DPCON; - cfg.destination.id = dpcon_id; - cfg.destination.priority = queue_conf->ev.priority; + cfg.destination.id = dpcon->dpcon_id; + cfg.destination.priority = priority; if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_ATOMIC) { options |= DPNI_QUEUE_OPT_HOLD_ACTIVE; @@ -2151,6 +2292,43 @@ dpaa2_dev_flow_ctrl(struct rte_eth_dev *dev, return ret; } +static void +dpaa2_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_rxq_info *qinfo) +{ + struct dpaa2_queue *rxq; + + rxq = (struct dpaa2_queue *)dev->data->rx_queues[queue_id]; + + qinfo->mp = rxq->mb_pool; + qinfo->scattered_rx = dev->data->scattered_rx; + qinfo->nb_desc = rxq->nb_desc; + + qinfo->conf.rx_free_thresh = 1; + qinfo->conf.rx_drop_en = 1; + qinfo->conf.rx_deferred_start = 0; + qinfo->conf.offloads = rxq->offloads; +} + +static void +dpaa2_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, + struct rte_eth_txq_info *qinfo) +{ + struct dpaa2_queue *txq; + + txq = dev->data->tx_queues[queue_id]; + + qinfo->nb_desc = txq->nb_desc; + qinfo->conf.tx_thresh.pthresh = 0; + qinfo->conf.tx_thresh.hthresh = 0; + qinfo->conf.tx_thresh.wthresh = 0; + + qinfo->conf.tx_free_thresh = 0; + qinfo->conf.tx_rs_thresh = 0; + qinfo->conf.offloads = txq->offloads; + qinfo->conf.tx_deferred_start = 0; +} + static struct eth_dev_ops dpaa2_ethdev_ops = { .dev_configure = dpaa2_eth_dev_configure, .dev_start = dpaa2_dev_start, @@ -2181,7 +2359,8 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { .rx_queue_release = dpaa2_dev_rx_queue_release, .tx_queue_setup = dpaa2_dev_tx_queue_setup, .tx_queue_release = dpaa2_dev_tx_queue_release, - .rx_queue_count = dpaa2_dev_rx_queue_count, + .rx_burst_mode_get = dpaa2_dev_rx_burst_mode_get, + .tx_burst_mode_get = dpaa2_dev_tx_burst_mode_get, .flow_ctrl_get = dpaa2_flow_ctrl_get, .flow_ctrl_set = dpaa2_flow_ctrl_set, .mac_addr_add = dpaa2_dev_add_mac_addr, @@ -2190,6 +2369,8 @@ static struct eth_dev_ops dpaa2_ethdev_ops = { .rss_hash_update = dpaa2_dev_rss_hash_update, .rss_hash_conf_get = dpaa2_dev_rss_hash_conf_get, .filter_ctrl = dpaa2_dev_flow_ctrl, + .rxq_info_get = dpaa2_rxq_info_get, + .txq_info_get = dpaa2_txq_info_get, #if defined(RTE_LIBRTE_IEEE1588) .timesync_enable = dpaa2_timesync_enable, .timesync_disable = dpaa2_timesync_disable, @@ -2325,7 +2506,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) DPAA2_PMD_ERR("Memory allocation failed for dpni device"); return -1; } - dpni_dev->regs = rte_mcp_ptr_list[0]; + dpni_dev->regs = dpaa2_get_mcp_ptr(MC_PORTAL_INDEX); eth_dev->process_private = (void *)dpni_dev; /* For secondary processes, the primary has done all the work */ @@ -2334,6 +2515,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) * plugged. */ eth_dev->dev_ops = &dpaa2_ethdev_ops; + eth_dev->rx_queue_count = dpaa2_dev_rx_queue_count; if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx; else if (dpaa2_get_devargs(dev->devargs, @@ -2374,6 +2556,10 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) } priv->num_rx_tc = attr.num_rx_tcs; + priv->qos_entries = attr.qos_entries; + priv->fs_entries = attr.fs_entries; + priv->dist_queues = attr.num_queues; + /* only if the custom CG is enabled */ if (attr.options & DPNI_OPT_CUSTOM_CG) priv->max_cgs = attr.num_cgs; @@ -2483,23 +2669,41 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = dpaa2_dev_tx; /*Init fields w.r.t. classficaition*/ - memset(&priv->extract.qos_key_cfg, 0, sizeof(struct dpkg_profile_cfg)); + memset(&priv->extract.qos_key_extract, 0, + sizeof(struct dpaa2_key_extract)); priv->extract.qos_extract_param = (size_t)rte_malloc(NULL, 256, 64); if (!priv->extract.qos_extract_param) { DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow " " classificaiton ", ret); goto init_err; } + priv->extract.qos_key_extract.key_info.ipv4_src_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.qos_key_extract.key_info.ipv4_dst_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.qos_key_extract.key_info.ipv6_src_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.qos_key_extract.key_info.ipv6_dst_offset = + IP_ADDRESS_OFFSET_INVALID; + for (i = 0; i < MAX_TCS; i++) { - memset(&priv->extract.fs_key_cfg[i], 0, - sizeof(struct dpkg_profile_cfg)); - priv->extract.fs_extract_param[i] = + memset(&priv->extract.tc_key_extract[i], 0, + sizeof(struct dpaa2_key_extract)); + priv->extract.tc_extract_param[i] = (size_t)rte_malloc(NULL, 256, 64); - if (!priv->extract.fs_extract_param[i]) { + if (!priv->extract.tc_extract_param[i]) { DPAA2_PMD_ERR(" Error(%d) in allocation resources for flow classificaiton", ret); goto init_err; } + priv->extract.tc_key_extract[i].key_info.ipv4_src_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.tc_key_extract[i].key_info.ipv4_dst_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.tc_key_extract[i].key_info.ipv6_src_offset = + IP_ADDRESS_OFFSET_INVALID; + priv->extract.tc_key_extract[i].key_info.ipv6_dst_offset = + IP_ADDRESS_OFFSET_INVALID; } ret = dpni_set_max_frame_length(dpni_dev, CMD_PRI_LOW, priv->token, @@ -2536,58 +2740,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev) RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name); return 0; init_err: - dpaa2_dev_uninit(eth_dev); - return ret; -} - -static int -dpaa2_dev_uninit(struct rte_eth_dev *eth_dev) -{ - struct dpaa2_dev_priv *priv = eth_dev->data->dev_private; - struct fsl_mc_io *dpni = (struct fsl_mc_io *)eth_dev->process_private; - int i, ret; - - PMD_INIT_FUNC_TRACE(); - - if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return 0; - - if (!dpni) { - DPAA2_PMD_WARN("Already closed or not started"); - return -1; - } - dpaa2_dev_close(eth_dev); - dpaa2_free_rx_tx_queues(eth_dev); - - /* Close the device at underlying layer*/ - ret = dpni_close(dpni, CMD_PRI_LOW, priv->token); - if (ret) { - DPAA2_PMD_ERR( - "Failure closing dpni device with err code %d", - ret); - } - - /* Free the allocated memory for ethernet private data and dpni*/ - priv->hw = NULL; - eth_dev->process_private = NULL; - rte_free(dpni); - - for (i = 0; i < MAX_TCS; i++) { - if (priv->extract.fs_extract_param[i]) - rte_free((void *)(size_t)priv->extract.fs_extract_param[i]); - } - - if (priv->extract.qos_extract_param) - rte_free((void *)(size_t)priv->extract.qos_extract_param); - - eth_dev->dev_ops = NULL; - eth_dev->rx_pkt_burst = NULL; - eth_dev->tx_pkt_burst = NULL; - - DPAA2_PMD_INFO("%s: netdev deleted", eth_dev->data->name); - return 0; + return ret; } static int @@ -2656,13 +2811,13 @@ static int rte_dpaa2_remove(struct rte_dpaa2_device *dpaa2_dev) { struct rte_eth_dev *eth_dev; + int ret; eth_dev = dpaa2_dev->eth_dev; - dpaa2_dev_uninit(eth_dev); - - rte_eth_dev_release_port(eth_dev); + dpaa2_dev_close(eth_dev); + ret = rte_eth_dev_release_port(eth_dev); - return 0; + return ret; } static struct rte_dpaa2_driver rte_dpaa2_pmd = { @@ -2676,9 +2831,4 @@ RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd); RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2, DRIVER_LOOPBACK_MODE "= " DRIVER_NO_PREFETCH_MODE "="); -RTE_INIT(dpaa2_pmd_init_log) -{ - dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2"); - if (dpaa2_logtype_pmd >= 0) - rte_log_set_level(dpaa2_logtype_pmd, RTE_LOG_NOTICE); -} +RTE_LOG_REGISTER(dpaa2_logtype_pmd, pmd.net.dpaa2, NOTICE);