#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_ring.h>
#include <dpaa_ethdev.h>
#include <dpaa_rxtx.h>
+#include <dpaa_flow.h>
#include <rte_pmd_dpaa.h>
#include <fsl_usd.h>
#include <fsl_bman.h>
#include <fsl_fman.h>
#include <process.h>
+#include <fmlib/fm_ext.h>
+
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_REPEAT_TIME 90 /* 9s (90 * 100ms) in total */
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
+static int fmc_q = 1; /* Indicates the use of static fmc for distribution */
static int default_q; /* use default queue - FMC is not executed*/
/* At present we only allow up to 4 push mode queues as default - as each of
* this queue need dedicated portal and we are short of portals.
return -EINVAL;
}
- if (frame_size > RTE_ETHER_MAX_LEN)
+ if (frame_size > DPAA_ETH_MAX_LEN)
dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
uint64_t rx_offloads = eth_conf->rxmode.offloads;
uint64_t tx_offloads = eth_conf->txmode.offloads;
struct rte_device *rdev = dev->device;
+ struct rte_eth_link *link = &dev->data->dev_link;
struct rte_dpaa_device *dpaa_dev;
struct fman_if *fif = dev->process_private;
struct __fman_if *__fif;
struct rte_intr_handle *intr_handle;
+ int speed, duplex;
int ret;
PMD_INIT_FUNC_TRACE();
dev->data->scattered_rx = 1;
}
+ if (!(default_q || fmc_q)) {
+ if (dpaa_fm_config(dev,
+ eth_conf->rx_adv_conf.rss_conf.rss_hf)) {
+ dpaa_write_fm_config_to_file();
+ DPAA_PMD_ERR("FM port configuration: Failed\n");
+ return -1;
+ }
+ dpaa_write_fm_config_to_file();
+ }
+
/* if the interrupts were configured on this devices*/
if (intr_handle && intr_handle->fd) {
if (dev->data->dev_conf.intr_conf.lsc != 0)
dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
}
}
+
+ /* Wait for link status to get updated */
+ if (!link->link_status)
+ sleep(1);
+
+ /* Configure link only if link is UP*/
+ if (link->link_status) {
+ if (eth_conf->link_speeds == ETH_LINK_SPEED_AUTONEG) {
+ /* Start autoneg only if link is not in autoneg mode */
+ if (!link->link_autoneg)
+ dpaa_restart_link_autoneg(__fif->node_name);
+ } else if (eth_conf->link_speeds & ETH_LINK_SPEED_FIXED) {
+ switch (eth_conf->link_speeds & ~ETH_LINK_SPEED_FIXED) {
+ case ETH_LINK_SPEED_10M_HD:
+ speed = ETH_SPEED_NUM_10M;
+ duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case ETH_LINK_SPEED_10M:
+ speed = ETH_SPEED_NUM_10M;
+ duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case ETH_LINK_SPEED_100M_HD:
+ speed = ETH_SPEED_NUM_100M;
+ duplex = ETH_LINK_HALF_DUPLEX;
+ break;
+ case ETH_LINK_SPEED_100M:
+ speed = ETH_SPEED_NUM_100M;
+ duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case ETH_LINK_SPEED_1G:
+ speed = ETH_SPEED_NUM_1G;
+ duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case ETH_LINK_SPEED_2_5G:
+ speed = ETH_SPEED_NUM_2_5G;
+ duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ case ETH_LINK_SPEED_10G:
+ speed = ETH_SPEED_NUM_10G;
+ duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ default:
+ speed = ETH_SPEED_NUM_NONE;
+ duplex = ETH_LINK_FULL_DUPLEX;
+ break;
+ }
+ /* Set link speed */
+ dpaa_update_link_speed(__fif->node_name, speed, duplex);
+ } else {
+ /* Manual autoneg - custom advertisement speed. */
+ printf("Custom Advertisement speeds not supported\n");
+ }
+ }
+
return 0;
}
if (bytes_read < 0)
DPAA_PMD_ERR("Error reading eventfd\n");
dpaa_eth_link_update(dev, 0);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
static int dpaa_eth_dev_start(struct rte_eth_dev *dev)
PMD_INIT_FUNC_TRACE();
+ if (!(default_q || fmc_q))
+ dpaa_write_fm_config_to_file();
+
/* Change tx callback to the real one */
if (dpaa_intf->cgr_tx)
dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
return 0;
}
-static void dpaa_eth_dev_stop(struct rte_eth_dev *dev)
+static int dpaa_eth_dev_stop(struct rte_eth_dev *dev)
{
struct fman_if *fif = dev->process_private;
PMD_INIT_FUNC_TRACE();
+ dev->data->dev_started = 0;
- fman_if_disable_rx(fif);
+ if (!fif->is_shared_mac)
+ fman_if_disable_rx(fif);
dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
+
+ return 0;
}
-static void dpaa_eth_dev_close(struct rte_eth_dev *dev)
+static int dpaa_eth_dev_close(struct rte_eth_dev *dev)
{
struct fman_if *fif = dev->process_private;
struct __fman_if *__fif;
struct rte_device *rdev = dev->device;
struct rte_dpaa_device *dpaa_dev;
struct rte_intr_handle *intr_handle;
+ struct rte_eth_link *link = &dev->data->dev_link;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ int loop;
+ int ret;
PMD_INIT_FUNC_TRACE();
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (!dpaa_intf) {
+ DPAA_PMD_WARN("Already closed or not started");
+ return -1;
+ }
+
+ /* DPAA FM deconfig */
+ if (!(default_q || fmc_q)) {
+ if (dpaa_fm_deconfig(dpaa_intf, dev->process_private))
+ DPAA_PMD_WARN("DPAA FM deconfig failed\n");
+ }
+
dpaa_dev = container_of(rdev, struct rte_dpaa_device, device);
intr_handle = &dpaa_dev->intr_handle;
__fif = container_of(fif, struct __fman_if, __if);
- dpaa_eth_dev_stop(dev);
+ ret = dpaa_eth_dev_stop(dev);
+
+ /* Reset link to autoneg */
+ if (link->link_status && !link->link_autoneg)
+ dpaa_restart_link_autoneg(__fif->node_name);
if (intr_handle && intr_handle->fd &&
dev->data->dev_conf.intr_conf.lsc != 0) {
dpaa_interrupt_handler,
(void *)dev);
}
+
+ /* release configuration memory */
+ if (dpaa_intf->fc_conf)
+ rte_free(dpaa_intf->fc_conf);
+
+ /* Release RX congestion Groups */
+ if (dpaa_intf->cgr_rx) {
+ for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
+ qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
+ }
+
+ rte_free(dpaa_intf->cgr_rx);
+ dpaa_intf->cgr_rx = NULL;
+ /* Release TX congestion Groups */
+ if (dpaa_intf->cgr_tx) {
+ for (loop = 0; loop < MAX_DPAA_CORES; loop++)
+ qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
+ rte_free(dpaa_intf->cgr_tx);
+ dpaa_intf->cgr_tx = NULL;
+ }
+
+ rte_free(dpaa_intf->rx_queues);
+ dpaa_intf->rx_queues = NULL;
+
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->tx_queues = NULL;
+
+ return ret;
}
static int
ret = snprintf(fw_version, fw_size, "SVR:%x-fman-v%x",
svr_ver, fman_ip_rev);
- ret += 1; /* add the size of '\0' */
+ if (ret < 0)
+ return -EINVAL;
- if (fw_size < (uint32_t)ret)
+ ret += 1; /* add the size of '\0' */
+ if (fw_size < (size_t)ret)
return ret;
else
return 0;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
if (fif->mac_type == fman_mac_1g) {
- dev_info->speed_capa = ETH_LINK_SPEED_1G;
+ dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
+ | ETH_LINK_SPEED_10M
+ | ETH_LINK_SPEED_100M_HD
+ | ETH_LINK_SPEED_100M
+ | ETH_LINK_SPEED_1G;
} else if (fif->mac_type == fman_mac_2_5g) {
- dev_info->speed_capa = ETH_LINK_SPEED_1G
+ dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
+ | ETH_LINK_SPEED_10M
+ | ETH_LINK_SPEED_100M_HD
+ | ETH_LINK_SPEED_100M
+ | ETH_LINK_SPEED_1G
| ETH_LINK_SPEED_2_5G;
} else if (fif->mac_type == fman_mac_10g) {
- dev_info->speed_capa = ETH_LINK_SPEED_1G
+ dev_info->speed_capa = ETH_LINK_SPEED_10M_HD
+ | ETH_LINK_SPEED_10M
+ | ETH_LINK_SPEED_100M_HD
+ | ETH_LINK_SPEED_100M
+ | ETH_LINK_SPEED_1G
| ETH_LINK_SPEED_2_5G
| ETH_LINK_SPEED_10G;
} else {
}
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
- int wait_to_complete __rte_unused)
+ int wait_to_complete)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
struct rte_eth_link *link = &dev->data->dev_link;
struct fman_if *fif = dev->process_private;
struct __fman_if *__fif = container_of(fif, struct __fman_if, __if);
- int ret;
+ int ret, ioctl_version;
+ uint8_t count;
PMD_INIT_FUNC_TRACE();
- if (fif->mac_type == fman_mac_1g)
- link->link_speed = ETH_SPEED_NUM_1G;
- else if (fif->mac_type == fman_mac_2_5g)
- link->link_speed = ETH_SPEED_NUM_2_5G;
- else if (fif->mac_type == fman_mac_10g)
- link->link_speed = ETH_SPEED_NUM_10G;
- else
- DPAA_PMD_ERR("invalid link_speed: %s, %d",
- dpaa_intf->name, fif->mac_type);
+ ioctl_version = dpaa_get_ioctl_version_number();
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) {
- ret = dpaa_get_link_status(__fif->node_name);
- if (ret < 0)
- return ret;
- link->link_status = ret;
+ for (count = 0; count <= MAX_REPEAT_TIME; count++) {
+ ret = dpaa_get_link_status(__fif->node_name, link);
+ if (ret)
+ return ret;
+ if (link->link_status == ETH_LINK_DOWN &&
+ wait_to_complete)
+ rte_delay_ms(CHECK_INTERVAL);
+ else
+ break;
+ }
} else {
link->link_status = dpaa_intf->valid;
}
- link->link_duplex = ETH_LINK_FULL_DUPLEX;
- link->link_autoneg = ETH_LINK_AUTONEG;
+ if (ioctl_version < 2) {
+ link->link_duplex = ETH_LINK_FULL_DUPLEX;
+ link->link_autoneg = ETH_LINK_AUTONEG;
+
+ if (fif->mac_type == fman_mac_1g)
+ link->link_speed = ETH_SPEED_NUM_1G;
+ else if (fif->mac_type == fman_mac_2_5g)
+ link->link_speed = ETH_SPEED_NUM_2_5G;
+ else if (fif->mac_type == fman_mac_10g)
+ link->link_speed = ETH_SPEED_NUM_10G;
+ else
+ DPAA_PMD_ERR("invalid link_speed: %s, %d",
+ dpaa_intf->name, fif->mac_type);
+ }
DPAA_PMD_INFO("Port %d Link is %s\n", dev->data->port_id,
link->link_status ? "Up" : "Down");
return 0;
}
+static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct fman_if_ic_params icp;
+ uint32_t fd_offset;
+ uint32_t bp_size;
+
+ memset(&icp, 0, sizeof(icp));
+ /* set ICEOF for to the default value , which is 0*/
+ icp.iciof = DEFAULT_ICIOF;
+ icp.iceof = DEFAULT_RX_ICEOF;
+ icp.icsz = DEFAULT_ICSZ;
+ fman_if_set_ic_params(dev->process_private, &icp);
+
+ fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+ fman_if_set_fdoff(dev->process_private, fd_offset);
+
+ /* Buffer pool size should be equal to Dataroom Size*/
+ bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
+
+ fman_if_set_bp(dev->process_private,
+ dpaa_intf->bp_info->mp->size,
+ dpaa_intf->bp_info->bpid, bp_size);
+}
+
+static inline int dpaa_eth_rx_queue_bp_check(struct rte_eth_dev *dev,
+ int8_t vsp_id, uint32_t bpid)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct fman_if *fif = dev->process_private;
+
+ if (fif->num_profiles) {
+ if (vsp_id < 0)
+ vsp_id = fif->base_profile_id;
+ } else {
+ if (vsp_id < 0)
+ vsp_id = 0;
+ }
+
+ if (dpaa_intf->vsp_bpid[vsp_id] &&
+ bpid != dpaa_intf->vsp_bpid[vsp_id]) {
+ DPAA_PMD_ERR("Various MPs are assigned to RXQs with same VSP");
+
+ return -1;
+ }
+
+ return 0;
+}
+
static
int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id __rte_unused,
- const struct rte_eth_rxconf *rx_conf __rte_unused,
+ const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
return -rte_errno;
}
+ /* Rx deferred start is not supported */
+ if (rx_conf->rx_deferred_start) {
+ DPAA_PMD_ERR("%p:Rx deferred start not supported", (void *)dev);
+ return -EINVAL;
+ }
+ rxq->nb_desc = UINT16_MAX;
+ rxq->offloads = rx_conf->offloads;
+
DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
queue_idx, rxq->fqid);
+ if (!fif->num_profiles) {
+ if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
+ dpaa_intf->bp_info->mp != mp) {
+ DPAA_PMD_WARN("Multiple pools on same interface not"
+ " supported");
+ return -EINVAL;
+ }
+ } else {
+ if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
+ return -EINVAL;
+ }
+ }
+
+ if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
+ dpaa_intf->bp_info->mp != mp) {
+ DPAA_PMD_WARN("Multiple pools on same interface not supported");
+ return -EINVAL;
+ }
+
/* Max packet can fit in single buffer */
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
;
buffsz - RTE_PKTMBUF_HEADROOM);
}
- if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
- struct fman_if_ic_params icp;
- uint32_t fd_offset;
- uint32_t bp_size;
+ dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
- if (!mp->pool_data) {
- DPAA_PMD_ERR("Not an offloaded buffer pool!");
- return -1;
+ /* For shared interface, it's done in kernel, skip.*/
+ if (!fif->is_shared_mac)
+ dpaa_fman_if_pool_setup(dev);
+
+ if (fif->num_profiles) {
+ int8_t vsp_id = rxq->vsp_id;
+
+ if (vsp_id >= 0) {
+ ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
+ fif);
+ if (ret) {
+ DPAA_PMD_ERR("dpaa_port_vsp_update failed");
+ return ret;
+ }
+ } else {
+ DPAA_PMD_INFO("Base profile is associated to"
+ " RXQ fqid:%d\r\n", rxq->fqid);
+ if (fif->is_shared_mac) {
+ DPAA_PMD_ERR("Fatal: Base profile is associated"
+ " to shared interface on DPDK.");
+ return -EINVAL;
+ }
+ dpaa_intf->vsp_bpid[fif->base_profile_id] =
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
}
- dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
-
- memset(&icp, 0, sizeof(icp));
- /* set ICEOF for to the default value , which is 0*/
- icp.iciof = DEFAULT_ICIOF;
- icp.iceof = DEFAULT_RX_ICEOF;
- icp.icsz = DEFAULT_ICSZ;
- fman_if_set_ic_params(fif, &icp);
-
- fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
- fman_if_set_fdoff(fif, fd_offset);
-
- /* Buffer pool size should be equal to Dataroom Size*/
- bp_size = rte_pktmbuf_data_room_size(mp);
- fman_if_set_bp(fif, mp->size,
- dpaa_intf->bp_info->bpid, bp_size);
- dpaa_intf->valid = 1;
- DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
- dpaa_intf->name, fd_offset,
- fman_if_get_fdoff(fif));
+ } else {
+ dpaa_intf->vsp_bpid[0] =
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
}
+
+ dpaa_intf->valid = 1;
DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
fman_if_get_sg_enable(fif),
dev->data->dev_conf.rxmode.max_rx_pkt_len);
if (dpaa_intf->cgr_rx) {
struct qm_mcc_initcgr cgr_opts = {0};
+ rxq->nb_desc = nb_desc;
/* Enable tail drop with cgr on this queue */
qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
ret = qman_modify_cgr(dpaa_intf->cgr_rx, 0, &cgr_opts);
rxq->fqid, ret);
}
}
-
+ /* Enable main queue to receive error packets also by default */
+ fman_if_set_err_fqid(fif, rxq->fqid);
return 0;
}
int dpaa_eth_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc __rte_unused,
unsigned int socket_id __rte_unused,
- const struct rte_eth_txconf *tx_conf __rte_unused)
+ const struct rte_eth_txconf *tx_conf)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *txq = &dpaa_intf->tx_queues[queue_idx];
PMD_INIT_FUNC_TRACE();
+ /* Tx deferred start is not supported */
+ if (tx_conf->tx_deferred_start) {
+ DPAA_PMD_ERR("%p:Tx deferred start not supported", (void *)dev);
+ return -EINVAL;
+ }
+ txq->nb_desc = UINT16_MAX;
+ txq->offloads = tx_conf->offloads;
+
if (queue_idx >= dev->data->nb_tx_queues) {
rte_errno = EOVERFLOW;
DPAA_PMD_ERR("%p: queue index out of range (%u >= %u)",
}
DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
- queue_idx, dpaa_intf->tx_queues[queue_idx].fqid);
- dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
+ queue_idx, txq->fqid);
+ dev->data->tx_queues[queue_idx] = txq;
return 0;
}
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
dpaa_update_link_status(__fif->node_name, ETH_LINK_DOWN);
else
- dpaa_eth_dev_stop(dev);
+ return dpaa_eth_dev_stop(dev);
return 0;
}
return ret;
}
+static int
+dpaa_dev_rss_hash_update(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (!(default_q || fmc_q)) {
+ if (dpaa_fm_config(dev, rss_conf->rss_hf)) {
+ DPAA_PMD_ERR("FM port configuration: Failed\n");
+ return -1;
+ }
+ eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_conf->rss_hf;
+ } else {
+ DPAA_PMD_ERR("Function not supported\n");
+ return -ENOTSUP;
+ }
+ return 0;
+}
+
+static int
+dpaa_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev_data *data = dev->data;
+ struct rte_eth_conf *eth_conf = &data->dev_conf;
+
+ /* dpaa does not support rss_key, so length should be 0*/
+ rss_conf->rss_key_len = 0;
+ rss_conf->rss_hf = eth_conf->rx_adv_conf.rss_conf.rss_hf;
+ return 0;
+}
+
static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
uint16_t queue_id)
{
return 0;
}
+static void
+dpaa_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_rxq_info *qinfo)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq;
+ int ret;
+
+ rxq = dev->data->rx_queues[queue_id];
+
+ qinfo->mp = dpaa_intf->bp_info->mp;
+ qinfo->scattered_rx = dev->data->scattered_rx;
+ qinfo->nb_desc = rxq->nb_desc;
+
+ /* Report the HW Rx buffer length to user */
+ ret = fman_if_get_maxfrm(dev->process_private);
+ if (ret > 0)
+ qinfo->rx_buf_size = ret;
+
+ qinfo->conf.rx_free_thresh = 1;
+ qinfo->conf.rx_drop_en = 1;
+ qinfo->conf.rx_deferred_start = 0;
+ qinfo->conf.offloads = rxq->offloads;
+}
+
+static void
+dpaa_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
+ struct rte_eth_txq_info *qinfo)
+{
+ struct qman_fq *txq;
+
+ txq = dev->data->tx_queues[queue_id];
+
+ qinfo->nb_desc = txq->nb_desc;
+ qinfo->conf.tx_thresh.pthresh = 0;
+ qinfo->conf.tx_thresh.hthresh = 0;
+ qinfo->conf.tx_thresh.wthresh = 0;
+
+ qinfo->conf.tx_free_thresh = 0;
+ qinfo->conf.tx_rs_thresh = 0;
+ qinfo->conf.offloads = txq->offloads;
+ qinfo->conf.tx_deferred_start = 0;
+}
+
static struct eth_dev_ops dpaa_devops = {
.dev_configure = dpaa_eth_dev_configure,
.dev_start = dpaa_eth_dev_start,
.tx_queue_setup = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
- .rx_queue_count = dpaa_dev_rx_queue_count,
.rx_burst_mode_get = dpaa_dev_rx_burst_mode_get,
.tx_burst_mode_get = dpaa_dev_tx_burst_mode_get,
+ .rxq_info_get = dpaa_rxq_info_get,
+ .txq_info_get = dpaa_txq_info_get,
+
.flow_ctrl_get = dpaa_flow_ctrl_get,
.flow_ctrl_set = dpaa_flow_ctrl_set,
.rx_queue_intr_enable = dpaa_dev_queue_intr_enable,
.rx_queue_intr_disable = dpaa_dev_queue_intr_disable,
+ .rss_hash_update = dpaa_dev_rss_hash_update,
+ .rss_hash_conf_get = dpaa_dev_rss_hash_conf_get,
};
static bool
}
int
-rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
+rte_pmd_dpaa_set_tx_loopback(uint16_t port, uint8_t on)
{
struct rte_eth_dev *dev;
}
};
- if (fqid) {
+ if (fmc_q || default_q) {
ret = qman_reserve_fqid(fqid);
if (ret) {
- DPAA_PMD_ERR("reserve rx fqid 0x%x failed with ret: %d",
+ DPAA_PMD_ERR("reserve rx fqid 0x%x failed, ret: %d",
fqid, ret);
return -EINVAL;
}
- } else {
- flags |= QMAN_FQ_FLAG_DYNAMIC_FQID;
}
+
DPAA_PMD_DEBUG("creating rx fq %p, fqid 0x%x", fq, fqid);
ret = qman_create_fq(fqid, flags, fq);
if (ret) {
struct fman_if_bpool *bp, *tmp_bp;
uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
uint32_t cgrid_tx[MAX_DPAA_CORES];
- char eth_buf[RTE_ETHER_ADDR_FMT_SIZE];
+ uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
+ int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
+ int8_t vsp_id = -1;
PMD_INIT_FUNC_TRACE();
dpaa_intf->ifid = dev_id;
dpaa_intf->cfg = cfg;
+ memset((char *)dev_rx_fqids, 0,
+ sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
+
+ memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
+
/* Initialize Rx FQ's */
if (default_q) {
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+ } else if (fmc_q) {
+ num_rx_fqs = dpaa_port_fmc_init(fman_intf, dev_rx_fqids,
+ dev_vspids,
+ DPAA_MAX_NUM_PCD_QUEUES);
+ if (num_rx_fqs < 0) {
+ DPAA_PMD_ERR("%s FMC initializes failed!",
+ dpaa_intf->name);
+ goto free_rx;
+ }
+ if (!num_rx_fqs) {
+ DPAA_PMD_WARN("%s is not configured by FMC.",
+ dpaa_intf->name);
+ }
} else {
- if (getenv("DPAA_NUM_RX_QUEUES"))
- num_rx_fqs = atoi(getenv("DPAA_NUM_RX_QUEUES"));
- else
- num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+ /* FMCLESS mode, load balance to multiple cores.*/
+ num_rx_fqs = rte_lcore_count();
}
-
/* Each device can not have more than DPAA_MAX_NUM_PCD_QUEUES RX
* queues.
*/
- if (num_rx_fqs <= 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
+ if (num_rx_fqs < 0 || num_rx_fqs > DPAA_MAX_NUM_PCD_QUEUES) {
DPAA_PMD_ERR("Invalid number of RX queues\n");
return -EINVAL;
}
- dpaa_intf->rx_queues = rte_zmalloc(NULL,
- sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
- if (!dpaa_intf->rx_queues) {
- DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
- return -ENOMEM;
+ if (num_rx_fqs > 0) {
+ dpaa_intf->rx_queues = rte_zmalloc(NULL,
+ sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+ if (!dpaa_intf->rx_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
+ return -ENOMEM;
+ }
+ } else {
+ dpaa_intf->rx_queues = NULL;
}
memset(cgrid, 0, sizeof(cgrid));
}
/* If congestion control is enabled globally*/
- if (td_threshold) {
+ if (num_rx_fqs > 0 && td_threshold) {
dpaa_intf->cgr_rx = rte_zmalloc(NULL,
sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
if (!dpaa_intf->cgr_rx) {
dpaa_intf->cgr_rx = NULL;
}
+ if (!fmc_q && !default_q) {
+ ret = qman_alloc_fqid_range(dev_rx_fqids, num_rx_fqs,
+ num_rx_fqs, 0);
+ if (ret < 0) {
+ DPAA_PMD_ERR("Failed to alloc rx fqid's\n");
+ goto free_rx;
+ }
+ }
+
for (loop = 0; loop < num_rx_fqs; loop++) {
if (default_q)
fqid = cfg->rx_def;
else
- fqid = DPAA_PCD_FQID_START + fman_intf->mac_idx *
- DPAA_PCD_FQID_MULTIPLIER + loop;
+ fqid = dev_rx_fqids[loop];
+
+ vsp_id = dev_vspids[loop];
if (dpaa_intf->cgr_rx)
dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
fqid);
if (ret)
goto free_rx;
+ dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
}
dpaa_intf->nb_rx_queues = num_rx_fqs;
dpaa_intf->nb_tx_queues = MAX_DPAA_CORES;
#ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
- dpaa_debug_queue_init(&dpaa_intf->debug_queues[
- DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
+ ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
+ [DPAA_DEBUG_FQ_RX_ERROR], fman_intf->fqid_rx_err);
+ if (ret) {
+ DPAA_PMD_ERR("DPAA RX ERROR queue init failed!");
+ goto free_tx;
+ }
dpaa_intf->debug_queues[DPAA_DEBUG_FQ_RX_ERROR].dpaa_intf = dpaa_intf;
- dpaa_debug_queue_init(&dpaa_intf->debug_queues[
- DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
+ ret = dpaa_debug_queue_init(&dpaa_intf->debug_queues
+ [DPAA_DEBUG_FQ_TX_ERROR], fman_intf->fqid_tx_err);
+ if (ret) {
+ DPAA_PMD_ERR("DPAA TX ERROR queue init failed!");
+ goto free_tx;
+ }
dpaa_intf->debug_queues[DPAA_DEBUG_FQ_TX_ERROR].dpaa_intf = dpaa_intf;
#endif
/* Populate ethdev structure */
eth_dev->dev_ops = &dpaa_devops;
+ eth_dev->rx_queue_count = dpaa_dev_rx_queue_count;
eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
eth_dev->tx_pkt_burst = dpaa_eth_tx_drop_all;
/* copy the primary mac address */
rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
- rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr);
-
- DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf);
-
- /* Disable RX mode */
- fman_if_discard_rx_errors(fman_intf);
- fman_if_disable_rx(fman_intf);
- /* Disable promiscuous mode */
- fman_if_promiscuous_disable(fman_intf);
- /* Disable multicast */
- fman_if_reset_mcast_filter_table(fman_intf);
- /* Reset interface statistics */
- fman_if_stats_reset(fman_intf);
- /* Disable SG by default */
- fman_if_set_sg(fman_intf, 0);
- fman_if_set_maxfrm(fman_intf, RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
+
+ RTE_LOG(INFO, PMD, "net: dpaa: %s: " RTE_ETHER_ADDR_PRT_FMT "\n",
+ dpaa_device->name,
+ fman_intf->mac_addr.addr_bytes[0],
+ fman_intf->mac_addr.addr_bytes[1],
+ fman_intf->mac_addr.addr_bytes[2],
+ fman_intf->mac_addr.addr_bytes[3],
+ fman_intf->mac_addr.addr_bytes[4],
+ fman_intf->mac_addr.addr_bytes[5]);
+
+ if (!fman_intf->is_shared_mac) {
+ /* Configure error packet handling */
+ fman_if_receive_rx_errors(fman_intf,
+ FM_FD_RX_STATUS_ERR_MASK);
+ /* Disable RX mode */
+ fman_if_disable_rx(fman_intf);
+ /* Disable promiscuous mode */
+ fman_if_promiscuous_disable(fman_intf);
+ /* Disable multicast */
+ fman_if_reset_mcast_filter_table(fman_intf);
+ /* Reset interface statistics */
+ fman_if_stats_reset(fman_intf);
+ /* Disable SG by default */
+ fman_if_set_sg(fman_intf, 0);
+ fman_if_set_maxfrm(fman_intf,
+ RTE_ETHER_MAX_LEN + VLAN_TAG_SIZE);
+ }
return 0;
}
static int
-dpaa_dev_uninit(struct rte_eth_dev *dev)
-{
- struct dpaa_if *dpaa_intf = dev->data->dev_private;
- int loop;
-
- PMD_INIT_FUNC_TRACE();
-
- if (rte_eal_process_type() != RTE_PROC_PRIMARY)
- return -EPERM;
-
- if (!dpaa_intf) {
- DPAA_PMD_WARN("Already closed or not started");
- return -1;
- }
-
- dpaa_eth_dev_close(dev);
-
- /* release configuration memory */
- if (dpaa_intf->fc_conf)
- rte_free(dpaa_intf->fc_conf);
-
- /* Release RX congestion Groups */
- if (dpaa_intf->cgr_rx) {
- for (loop = 0; loop < dpaa_intf->nb_rx_queues; loop++)
- qman_delete_cgr(&dpaa_intf->cgr_rx[loop]);
-
- qman_release_cgrid_range(dpaa_intf->cgr_rx[loop].cgrid,
- dpaa_intf->nb_rx_queues);
- }
-
- rte_free(dpaa_intf->cgr_rx);
- dpaa_intf->cgr_rx = NULL;
-
- /* Release TX congestion Groups */
- if (dpaa_intf->cgr_tx) {
- for (loop = 0; loop < MAX_DPAA_CORES; loop++)
- qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
-
- qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid,
- MAX_DPAA_CORES);
- rte_free(dpaa_intf->cgr_tx);
- dpaa_intf->cgr_tx = NULL;
- }
-
- rte_free(dpaa_intf->rx_queues);
- dpaa_intf->rx_queues = NULL;
-
- rte_free(dpaa_intf->tx_queues);
- dpaa_intf->tx_queues = NULL;
-
- dev->dev_ops = NULL;
- dev->rx_pkt_burst = NULL;
- dev->tx_pkt_burst = NULL;
-
- return 0;
-}
-
-static int
-rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv __rte_unused,
+rte_dpaa_probe(struct rte_dpaa_driver *dpaa_drv,
struct rte_dpaa_device *dpaa_dev)
{
int diag;
default_q = 1;
}
+ if (!(default_q || fmc_q)) {
+ if (dpaa_fm_init()) {
+ DPAA_PMD_ERR("FM init failed\n");
+ return -1;
+ }
+ }
+
/* disabling the default push mode for LS1043 */
if (dpaa_svr_family == SVR_LS1043A_FAMILY)
dpaa_push_mode_max_queue = 0;
if (dpaa_drv->drv_flags & RTE_DPAA_DRV_INTR_LSC)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
+
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
if (diag == 0) {
rte_dpaa_remove(struct rte_dpaa_device *dpaa_dev)
{
struct rte_eth_dev *eth_dev;
+ int ret;
PMD_INIT_FUNC_TRACE();
eth_dev = dpaa_dev->eth_dev;
- dpaa_dev_uninit(eth_dev);
+ dpaa_eth_dev_close(eth_dev);
+ ret = rte_eth_dev_release_port(eth_dev);
- rte_eth_dev_release_port(eth_dev);
+ return ret;
+}
- return 0;
+static void __attribute__((destructor(102))) dpaa_finish(void)
+{
+ /* For secondary, primary will do all the cleanup */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return;
+
+ if (!(default_q || fmc_q)) {
+ unsigned int i;
+
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (rte_eth_devices[i].dev_ops == &dpaa_devops) {
+ struct rte_eth_dev *dev = &rte_eth_devices[i];
+ struct dpaa_if *dpaa_intf =
+ dev->data->dev_private;
+ struct fman_if *fif =
+ dev->process_private;
+ if (dpaa_intf->port_handle)
+ if (dpaa_fm_deconfig(dpaa_intf, fif))
+ DPAA_PMD_WARN("DPAA FM "
+ "deconfig failed\n");
+ if (fif->num_profiles) {
+ if (dpaa_port_vsp_cleanup(dpaa_intf,
+ fif))
+ DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
+ }
+ }
+ }
+ if (is_global_init)
+ if (dpaa_fm_term())
+ DPAA_PMD_WARN("DPAA FM term failed\n");
+
+ is_global_init = 0;
+
+ DPAA_PMD_INFO("DPAA fman cleaned up");
+ }
}
static struct rte_dpaa_driver rte_dpaa_pmd = {
};
RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
-RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE);
+RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_pmd, NOTICE);