/* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright 2016 Freescale Semiconductor, Inc. All rights reserved.
- * Copyright 2017 NXP
+ * Copyright 2017-2020 NXP
*
*/
/* System headers */
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+ DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_RSS_HASH;
/* Supported Tx offloads */
-static uint64_t dev_tx_offloads_sup;
+static uint64_t dev_tx_offloads_sup =
+ DEV_TX_OFFLOAD_MT_LOCKFREE |
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
/* Tx offloads which cannot be disabled */
static uint64_t dev_tx_offloads_nodis =
DEV_TX_OFFLOAD_TCP_CKSUM |
DEV_TX_OFFLOAD_SCTP_CKSUM |
DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
- DEV_TX_OFFLOAD_MULTI_SEGS |
- DEV_TX_OFFLOAD_MT_LOCKFREE |
- DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+ DEV_TX_OFFLOAD_MULTI_SEGS;
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
-/* Per FQ Taildrop in frame count */
+/* Per RX FQ Taildrop in frame count */
static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
+/* Per TX FQ Taildrop in frame count, disabled by default */
+static unsigned int td_tx_threshold;
+
struct rte_dpaa_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
uint32_t offset;
static struct rte_dpaa_driver rte_dpaa_pmd;
-static void
+static int
dpaa_eth_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info);
static inline void
}
if (frame_size > RTE_ETHER_MAX_LEN)
- dev->data->dev_conf.rxmode.offloads &=
+ dev->data->dev_conf.rxmode.offloads |=
DEV_RX_OFFLOAD_JUMBO_FRAME;
else
dev->data->dev_conf.rxmode.offloads &=
PMD_INIT_FUNC_TRACE();
- /* Rx offloads validation */
+ /* Rx offloads which are enabled by default */
if (dev_rx_offloads_nodis & ~rx_offloads) {
- DPAA_PMD_WARN(
- "Rx offloads non configurable - requested 0x%" PRIx64
- " ignored 0x%" PRIx64,
- rx_offloads, dev_rx_offloads_nodis);
+ DPAA_PMD_INFO(
+ "Some of rx offloads enabled by default - requested 0x%" PRIx64
+ " fixed are 0x%" PRIx64,
+ rx_offloads, dev_rx_offloads_nodis);
}
- /* Tx offloads validation */
+ /* Tx offloads which are enabled by default */
if (dev_tx_offloads_nodis & ~tx_offloads) {
- DPAA_PMD_WARN(
- "Tx offloads non configurable - requested 0x%" PRIx64
- " ignored 0x%" PRIx64,
- tx_offloads, dev_tx_offloads_nodis);
+ DPAA_PMD_INFO(
+ "Some of tx offloads enabled by default - requested 0x%" PRIx64
+ " fixed are 0x%" PRIx64,
+ tx_offloads, dev_tx_offloads_nodis);
}
if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
PMD_INIT_FUNC_TRACE();
/* Change tx callback to the real one */
- dev->tx_pkt_burst = dpaa_eth_queue_tx;
+ if (dpaa_intf->cgr_tx)
+ dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
+ else
+ dev->tx_pkt_burst = dpaa_eth_queue_tx;
+
fman_if_enable_rx(dpaa_intf->fif);
return 0;
return 0;
}
-static void dpaa_eth_dev_info(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info)
+static int dpaa_eth_dev_info(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
- PMD_INIT_FUNC_TRACE();
+ DPAA_PMD_DEBUG(": %s", dpaa_intf->name);
dev_info->max_rx_queues = dpaa_intf->nb_rx_queues;
dev_info->max_tx_queues = dpaa_intf->nb_tx_queues;
dev_info->max_vmdq_pools = ETH_16_POOLS;
dev_info->flow_type_rss_offloads = DPAA_RSS_OFFLOAD_ALL;
- if (dpaa_intf->fif->mac_type == fman_mac_1g)
+ if (dpaa_intf->fif->mac_type == fman_mac_1g) {
dev_info->speed_capa = ETH_LINK_SPEED_1G;
- else if (dpaa_intf->fif->mac_type == fman_mac_10g)
- dev_info->speed_capa = (ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G);
- else
+ } else if (dpaa_intf->fif->mac_type == fman_mac_2_5g) {
+ dev_info->speed_capa = ETH_LINK_SPEED_1G
+ | ETH_LINK_SPEED_2_5G;
+ } else if (dpaa_intf->fif->mac_type == fman_mac_10g) {
+ dev_info->speed_capa = ETH_LINK_SPEED_1G
+ | ETH_LINK_SPEED_2_5G
+ | ETH_LINK_SPEED_10G;
+ } else {
DPAA_PMD_ERR("invalid link_speed: %s, %d",
dpaa_intf->name, dpaa_intf->fif->mac_type);
+ return -EINVAL;
+ }
dev_info->rx_offload_capa = dev_rx_offloads_sup |
dev_rx_offloads_nodis;
dev_tx_offloads_nodis;
dev_info->default_rxportconf.burst_size = DPAA_DEF_RX_BURST_SIZE;
dev_info->default_txportconf.burst_size = DPAA_DEF_TX_BURST_SIZE;
+ dev_info->default_rxportconf.nb_queues = 1;
+ dev_info->default_txportconf.nb_queues = 1;
+ dev_info->default_txportconf.ring_size = CGR_TX_CGR_THRESH;
+ dev_info->default_rxportconf.ring_size = CGR_RX_PERFQ_THRESH;
+
+ return 0;
}
static int dpaa_eth_link_update(struct rte_eth_dev *dev,
if (dpaa_intf->fif->mac_type == fman_mac_1g)
link->link_speed = ETH_SPEED_NUM_1G;
+ else if (dpaa_intf->fif->mac_type == fman_mac_2_5g)
+ link->link_speed = ETH_SPEED_NUM_2_5G;
else if (dpaa_intf->fif->mac_type == fman_mac_10g)
link->link_speed = ETH_SPEED_NUM_10G;
else
return 0;
}
-static void dpaa_eth_stats_reset(struct rte_eth_dev *dev)
+static int dpaa_eth_stats_reset(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_stats_reset(dpaa_intf->fif);
+
+ return 0;
}
static int
return limit;
}
-static void dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
+static int dpaa_eth_promiscuous_enable(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_promiscuous_enable(dpaa_intf->fif);
+
+ return 0;
}
-static void dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
+static int dpaa_eth_promiscuous_disable(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_promiscuous_disable(dpaa_intf->fif);
+
+ return 0;
}
-static void dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
+static int dpaa_eth_multicast_enable(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_set_mcast_filter_table(dpaa_intf->fif);
+
+ return 0;
}
-static void dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
+static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
fman_if_reset_mcast_filter_table(dpaa_intf->fif);
+
+ return 0;
}
static
fman_if_get_sg_enable(dpaa_intf->fif),
dev->data->dev_conf.rxmode.max_rx_pkt_len);
/* checking if push mode only, no error check for now */
- if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+ if (!rxq->is_static &&
+ dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+ struct qman_portal *qp;
+ int q_fd;
+
dpaa_push_queue_idx++;
opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
}
rxq->is_static = true;
+
+ /* Allocate qman specific portals */
+ qp = fsl_qman_fq_portal_create(&q_fd);
+ if (!qp) {
+ DPAA_PMD_ERR("Unable to alloc fq portal");
+ return -1;
+ }
+ rxq->qp = qp;
+
+ /* Set up the device interrupt handler */
+ if (!dev->intr_handle) {
+ struct rte_dpaa_device *dpaa_dev;
+ struct rte_device *rdev = dev->device;
+
+ dpaa_dev = container_of(rdev, struct rte_dpaa_device,
+ device);
+ dev->intr_handle = &dpaa_dev->intr_handle;
+ dev->intr_handle->intr_vec = rte_zmalloc(NULL,
+ dpaa_push_mode_max_queue, 0);
+ if (!dev->intr_handle->intr_vec) {
+ DPAA_PMD_ERR("intr_vec alloc failed");
+ return -ENOMEM;
+ }
+ dev->intr_handle->nb_efd = dpaa_push_mode_max_queue;
+ dev->intr_handle->max_intr = dpaa_push_mode_max_queue;
+ }
+
+ dev->intr_handle->type = RTE_INTR_HANDLE_EXT;
+ dev->intr_handle->intr_vec[queue_idx] = queue_idx + 1;
+ dev->intr_handle->efds[queue_idx] = q_fd;
+ rxq->q_fd = q_fd;
}
rxq->bp_array = rte_dpaa_bpid_info;
dev->data->rx_queues[queue_idx] = rxq;
DPAA_PMD_INFO("Tx queue setup for queue index: %d fq_id (0x%x)",
queue_idx, dpaa_intf->tx_queues[queue_idx].fqid);
dev->data->tx_queues[queue_idx] = &dpaa_intf->tx_queues[queue_idx];
+
return 0;
}
PMD_INIT_FUNC_TRACE();
if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
- RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
- rx_queue_id, frm_cnt);
+ DPAA_PMD_DEBUG("RX frame count for q(%d) is %u",
+ rx_queue_id, frm_cnt);
}
return frm_cnt;
}
ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, index);
if (ret)
- RTE_LOG(ERR, PMD, "error: Adding the MAC ADDR failed:"
- " err = %d", ret);
+ DPAA_PMD_ERR("Adding the MAC ADDR failed: err = %d", ret);
return 0;
}
ret = fman_if_add_mac_addr(dpaa_intf->fif, addr->addr_bytes, 0);
if (ret)
- RTE_LOG(ERR, PMD, "error: Setting the MAC ADDR failed %d", ret);
+ DPAA_PMD_ERR("Setting the MAC ADDR failed %d", ret);
return ret;
}
+static int dpaa_dev_queue_intr_enable(struct rte_eth_dev *dev,
+ uint16_t queue_id)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
+
+ if (!rxq->is_static)
+ return -EINVAL;
+
+ return qman_fq_portal_irqsource_add(rxq->qp, QM_PIRQ_DQRI);
+}
+
+static int dpaa_dev_queue_intr_disable(struct rte_eth_dev *dev,
+ uint16_t queue_id)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_id];
+ uint32_t temp;
+ ssize_t temp1;
+
+ if (!rxq->is_static)
+ return -EINVAL;
+
+ qman_fq_portal_irqsource_remove(rxq->qp, ~0);
+
+ temp1 = read(rxq->q_fd, &temp, sizeof(temp));
+ if (temp1 != sizeof(temp))
+ DPAA_PMD_ERR("irq read error");
+
+ qman_fq_portal_thread_irq(rxq->qp);
+
+ return 0;
+}
+
static struct eth_dev_ops dpaa_devops = {
.dev_configure = dpaa_eth_dev_configure,
.dev_start = dpaa_eth_dev_start,
.mac_addr_set = dpaa_dev_set_mac_addr,
.fw_version_get = dpaa_fw_version_get,
+
+ .rx_queue_intr_enable = dpaa_dev_queue_intr_enable,
+ .rx_queue_intr_disable = dpaa_dev_queue_intr_disable,
};
static bool
}
};
- PMD_INIT_FUNC_TRACE();
-
if (fqid) {
ret = qman_reserve_fqid(fqid);
if (ret) {
/* Initialise a Tx FQ */
static int dpaa_tx_queue_init(struct qman_fq *fq,
- struct fman_if *fman_intf)
+ struct fman_if *fman_intf,
+ struct qman_cgr *cgr_tx)
{
struct qm_mcc_initfq opts = {0};
+ struct qm_mcc_initcgr cgr_opts = {
+ .we_mask = QM_CGR_WE_CS_THRES |
+ QM_CGR_WE_CSTD_EN |
+ QM_CGR_WE_MODE,
+ .cgr = {
+ .cstd_en = QM_CGR_EN,
+ .mode = QMAN_CGR_MODE_FRAME
+ }
+ };
int ret;
- PMD_INIT_FUNC_TRACE();
-
ret = qman_create_fq(0, QMAN_FQ_FLAG_DYNAMIC_FQID |
QMAN_FQ_FLAG_TO_DCPORTAL, fq);
if (ret) {
opts.fqd.context_a.hi = 0x80000000 | fman_dealloc_bufs_mask_hi;
opts.fqd.context_a.lo = 0 | fman_dealloc_bufs_mask_lo;
DPAA_PMD_DEBUG("init tx fq %p, fqid 0x%x", fq, fq->fqid);
+
+ if (cgr_tx) {
+ /* Enable tail drop with cgr on this queue */
+ qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres,
+ td_tx_threshold, 0);
+ cgr_tx->cb = NULL;
+ ret = qman_create_cgr(cgr_tx, QMAN_CGR_FLAG_USE_INIT,
+ &cgr_opts);
+ if (ret) {
+ DPAA_PMD_WARN(
+ "rx taildrop init fail on rx fqid 0x%x(ret=%d)",
+ fq->fqid, ret);
+ goto without_cgr;
+ }
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = cgr_tx->cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ DPAA_PMD_DEBUG("Tx FQ tail drop enabled, threshold = %d\n",
+ td_tx_threshold);
+ }
+without_cgr:
ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts);
if (ret)
DPAA_PMD_ERR("init tx fqid 0x%x failed %d", fq->fqid, ret);
struct fman_if *fman_intf;
struct fman_if_bpool *bp, *tmp_bp;
uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
+ uint32_t cgrid_tx[MAX_DPAA_CORES];
+ char eth_buf[RTE_ETHER_ADDR_FMT_SIZE];
PMD_INIT_FUNC_TRACE();
eth_dev->dev_ops = &dpaa_devops;
/* Plugging of UCODE burst API not supported in Secondary */
eth_dev->rx_pkt_burst = dpaa_eth_queue_rx;
- eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
+ if (dpaa_intf->cgr_tx)
+ eth_dev->tx_pkt_burst = dpaa_eth_queue_tx_slow;
+ else
+ eth_dev->tx_pkt_burst = dpaa_eth_queue_tx;
#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
qman_set_fq_lookup_table(
dpaa_intf->rx_queues->qman_fq_lookup_table);
dpaa_device = DEV_TO_DPAA_DEVICE(eth_dev->device);
dev_id = dpaa_device->id.dev_id;
dpaa_intf = eth_dev->data->dev_private;
- cfg = &dpaa_netcfg->port_cfg[dev_id];
+ cfg = dpaa_get_eth_port_cfg(dev_id);
fman_intf = cfg->fman_if;
dpaa_intf->name = dpaa_device->name;
return -ENOMEM;
}
+ memset(cgrid, 0, sizeof(cgrid));
+ memset(cgrid_tx, 0, sizeof(cgrid_tx));
+
+ /* if DPAA_TX_TAILDROP_THRESHOLD is set, use that value; if 0, it means
+ * Tx tail drop is disabled.
+ */
+ if (getenv("DPAA_TX_TAILDROP_THRESHOLD")) {
+ td_tx_threshold = atoi(getenv("DPAA_TX_TAILDROP_THRESHOLD"));
+ DPAA_PMD_DEBUG("Tail drop threshold env configured: %u",
+ td_tx_threshold);
+ /* if a very large value is being configured */
+ if (td_tx_threshold > UINT16_MAX)
+ td_tx_threshold = CGR_RX_PERFQ_THRESH;
+ }
+
/* If congestion control is enabled globally*/
if (td_threshold) {
dpaa_intf->cgr_rx = rte_zmalloc(NULL,
goto free_rx;
}
+ /* If congestion control is enabled globally*/
+ if (td_tx_threshold) {
+ dpaa_intf->cgr_tx = rte_zmalloc(NULL,
+ sizeof(struct qman_cgr) * MAX_DPAA_CORES,
+ MAX_CACHELINE);
+ if (!dpaa_intf->cgr_tx) {
+ DPAA_PMD_ERR("Failed to alloc mem for cgr_tx\n");
+ ret = -ENOMEM;
+ goto free_rx;
+ }
+
+ ret = qman_alloc_cgrid_range(&cgrid_tx[0], MAX_DPAA_CORES,
+ 1, 0);
+ if (ret != MAX_DPAA_CORES) {
+ DPAA_PMD_WARN("insufficient CGRIDs available");
+ ret = -EINVAL;
+ goto free_rx;
+ }
+ } else {
+ dpaa_intf->cgr_tx = NULL;
+ }
+
+
for (loop = 0; loop < MAX_DPAA_CORES; loop++) {
+ if (dpaa_intf->cgr_tx)
+ dpaa_intf->cgr_tx[loop].cgrid = cgrid_tx[loop];
+
ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
- fman_intf);
+ fman_intf,
+ dpaa_intf->cgr_tx ? &dpaa_intf->cgr_tx[loop] : NULL);
if (ret)
goto free_tx;
dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
/* copy the primary mac address */
rte_ether_addr_copy(&fman_intf->mac_addr, ð_dev->data->mac_addrs[0]);
+ rte_ether_format_addr(eth_buf, sizeof(eth_buf), &fman_intf->mac_addr);
- RTE_LOG(INFO, PMD, "net: dpaa: %s: %02x:%02x:%02x:%02x:%02x:%02x\n",
- dpaa_device->name,
- fman_intf->mac_addr.addr_bytes[0],
- fman_intf->mac_addr.addr_bytes[1],
- fman_intf->mac_addr.addr_bytes[2],
- fman_intf->mac_addr.addr_bytes[3],
- fman_intf->mac_addr.addr_bytes[4],
- fman_intf->mac_addr.addr_bytes[5]);
+ DPAA_PMD_INFO("net: dpaa: %s: %s", dpaa_device->name, eth_buf);
/* Disable RX mode */
fman_if_discard_rx_errors(fman_intf);
free_rx:
rte_free(dpaa_intf->cgr_rx);
+ rte_free(dpaa_intf->cgr_tx);
rte_free(dpaa_intf->rx_queues);
dpaa_intf->rx_queues = NULL;
dpaa_intf->nb_rx_queues = 0;
rte_free(dpaa_intf->cgr_rx);
dpaa_intf->cgr_rx = NULL;
+ /* Release TX congestion Groups */
+ if (dpaa_intf->cgr_tx) {
+ for (loop = 0; loop < MAX_DPAA_CORES; loop++)
+ qman_delete_cgr(&dpaa_intf->cgr_tx[loop]);
+
+ qman_release_cgrid_range(dpaa_intf->cgr_tx[loop].cgrid,
+ MAX_DPAA_CORES);
+ rte_free(dpaa_intf->cgr_tx);
+ dpaa_intf->cgr_tx = NULL;
+ }
+
rte_free(dpaa_intf->rx_queues);
dpaa_intf->rx_queues = NULL;
PMD_INIT_FUNC_TRACE();
+ if ((DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE) >
+ RTE_PKTMBUF_HEADROOM) {
+ DPAA_PMD_ERR(
+ "RTE_PKTMBUF_HEADROOM(%d) shall be > DPAA Annotation req(%d)",
+ RTE_PKTMBUF_HEADROOM,
+ DPAA_MBUF_HW_ANNOTATION + DPAA_FD_PTA_SIZE);
+
+ return -1;
+ }
+
/* In case of secondary process, the device is already configured
* and no further action is required, except portal initialization
* and verifying secondary attachment to port name.
}
if (!is_global_init && (rte_eal_process_type() == RTE_PROC_PRIMARY)) {
- /* One time load of Qman/Bman drivers */
- ret = qman_global_init();
- if (ret) {
- DPAA_PMD_ERR("QMAN initialization failed: %d",
- ret);
- return ret;
- }
- ret = bman_global_init();
- if (ret) {
- DPAA_PMD_ERR("BMAN initialization failed: %d",
- ret);
- return ret;
- }
-
if (access("/tmp/fmc.bin", F_OK) == -1) {
- RTE_LOG(INFO, PMD,
- "* FMC not configured.Enabling default mode\n");
+ DPAA_PMD_INFO("* FMC not configured.Enabling default mode");
default_q = 1;
}
eth_dev->device = &dpaa_dev->device;
dpaa_dev->eth_dev = eth_dev;
+ qman_ern_register_cb(dpaa_free_mbuf);
+
/* Invoke PMD device initialization function */
diag = dpaa_dev_init(eth_dev);
if (diag == 0) {
};
RTE_PMD_REGISTER_DPAA(net_dpaa, rte_dpaa_pmd);
+RTE_LOG_REGISTER(dpaa_logtype_pmd, pmd.net.dpaa, NOTICE);