#include <rte_eal.h>
#include <rte_alarm.h>
#include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
#include <rte_malloc.h>
#include <rte_ring.h>
#include <dpaa_ethdev.h>
#include <dpaa_rxtx.h>
+#include <rte_pmd_dpaa.h>
#include <fsl_usd.h>
#include <fsl_qman.h>
/* Keep track of whether QMAN and BMAN have been globally initialized */
static int is_global_init;
+/* At present we only allow up to 4 push mode queues - as each of this queue
+ * need dedicated portal and we are short of portals.
+ */
+#define DPAA_MAX_PUSH_MODE_QUEUE 4
+
+static int dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+static int dpaa_push_queue_idx; /* Queue index which are in push mode*/
+
/* Per FQ Taildrop in frame count */
static unsigned int td_threshold = CGR_RX_PERFQ_THRESH;
offsetof(struct dpaa_if_stats, tund)},
};
+static struct rte_dpaa_driver rte_dpaa_pmd;
+
+static inline void
+dpaa_poll_queue_default_config(struct qm_mcc_initfq *opts)
+{
+ memset(opts, 0, sizeof(struct qm_mcc_initfq));
+ opts->we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts->fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts->fqd.context_a.stashing.exclusive = 0;
+ if (dpaa_svr_family != SVR_LS1046A_FAMILY)
+ opts->fqd.context_a.stashing.annotation_cl =
+ DPAA_IF_RX_ANNOTATION_STASH;
+ opts->fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts->fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+}
+
static int
dpaa_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
{
struct dpaa_if *dpaa_intf = dev->data->dev_private;
struct qman_fq *rxq = &dpaa_intf->rx_queues[queue_idx];
+ struct qm_mcc_initfq opts = {0};
+ u32 flags = 0;
+ int ret;
PMD_INIT_FUNC_TRACE();
dpaa_intf->name, fd_offset,
fman_if_get_fdoff(dpaa_intf->fif));
}
-
+ /* checking if push mode only, no error check for now */
+ if (dpaa_push_mode_max_queue > dpaa_push_queue_idx) {
+ dpaa_push_queue_idx++;
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK |
+ QM_FQCTRL_CTXASTASHING |
+ QM_FQCTRL_PREFERINCACHE;
+ opts.fqd.context_a.stashing.exclusive = 0;
+ opts.fqd.context_a.stashing.annotation_cl =
+ DPAA_IF_RX_ANNOTATION_STASH;
+ opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
+ opts.fqd.context_a.stashing.context_cl =
+ DPAA_IF_RX_CONTEXT_STASH;
+
+ /*Create a channel and associate given queue with the channel*/
+ qman_alloc_pool_range((u32 *)&rxq->ch_id, 1, 1, 0);
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = rxq->ch_id;
+ opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ /* Configure tail drop */
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[queue_idx].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret)
+ DPAA_PMD_ERR("Channel/Queue association failed. fqid %d"
+ " ret: %d", rxq->fqid, ret);
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb;
+ rxq->is_static = true;
+ }
dev->data->rx_queues[queue_idx] = rxq;
/* configure the CGR size as per the desc size */
if (dpaa_intf->cgr_rx) {
struct qm_mcc_initcgr cgr_opts = {0};
- int ret;
/* Enable tail drop with cgr on this queue */
qm_cgr_cs_thres_set64(&cgr_opts.cgr.cs_thres, nb_desc, 0);
return 0;
}
+int dpaa_eth_eventq_attach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id,
+ u16 ch_id,
+ const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+ int ret;
+ u32 flags = 0;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+ struct qm_mcc_initfq opts = {0};
+
+ if (dpaa_push_mode_max_queue)
+ DPAA_PMD_WARN("PUSH mode already enabled for first %d queues.\n"
+ "To disable set DPAA_PUSH_QUEUES_NUMBER to 0\n",
+ dpaa_push_mode_max_queue);
+
+ dpaa_poll_queue_default_config(&opts);
+
+ switch (queue_conf->ev.sched_type) {
+ case RTE_SCHED_TYPE_ATOMIC:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_HOLDACTIVE;
+ /* Reset FQCTRL_AVOIDBLOCK bit as it is unnecessary
+ * configuration with HOLD_ACTIVE setting
+ */
+ opts.fqd.fq_ctrl &= (~QM_FQCTRL_AVOIDBLOCK);
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_atomic;
+ break;
+ case RTE_SCHED_TYPE_ORDERED:
+ DPAA_PMD_ERR("Ordered queue schedule type is not supported\n");
+ return -1;
+ default:
+ opts.fqd.fq_ctrl |= QM_FQCTRL_AVOIDBLOCK;
+ rxq->cb.dqrr_dpdk_cb = dpaa_rx_cb_parallel;
+ break;
+ }
+
+ opts.we_mask = opts.we_mask | QM_INITFQ_WE_DESTWQ;
+ opts.fqd.dest.channel = ch_id;
+ opts.fqd.dest.wq = queue_conf->ev.priority;
+
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+
+ flags = QMAN_INITFQ_FLAG_SCHED;
+
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("Channel/Queue association failed. fqid %d ret:%d",
+ rxq->fqid, ret);
+ return ret;
+ }
+
+ /* copy configuration which needs to be filled during dequeue */
+ memcpy(&rxq->ev, &queue_conf->ev, sizeof(struct rte_event));
+ dev->data->rx_queues[eth_rx_queue_id] = rxq;
+
+ return ret;
+}
+
+int dpaa_eth_eventq_detach(const struct rte_eth_dev *dev,
+ int eth_rx_queue_id)
+{
+ struct qm_mcc_initfq opts;
+ int ret;
+ u32 flags = 0;
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[eth_rx_queue_id];
+
+ dpaa_poll_queue_default_config(&opts);
+
+ if (dpaa_intf->cgr_rx) {
+ opts.we_mask |= QM_INITFQ_WE_CGID;
+ opts.fqd.cgid = dpaa_intf->cgr_rx[eth_rx_queue_id].cgrid;
+ opts.fqd.fq_ctrl |= QM_FQCTRL_CGE;
+ }
+
+ ret = qman_init_fq(rxq, flags, &opts);
+ if (ret) {
+ DPAA_PMD_ERR("init rx fqid %d failed with ret: %d",
+ rxq->fqid, ret);
+ }
+
+ rxq->cb.dqrr_dpdk_cb = NULL;
+ dev->data->rx_queues[eth_rx_queue_id] = NULL;
+
+ return 0;
+}
+
static
void dpaa_eth_rx_queue_release(void *rxq __rte_unused)
{
PMD_INIT_FUNC_TRACE();
}
+static uint32_t
+dpaa_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct qman_fq *rxq = &dpaa_intf->rx_queues[rx_queue_id];
+ u32 frm_cnt = 0;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (qman_query_fq_frm_cnt(rxq, &frm_cnt) == 0) {
+ RTE_LOG(DEBUG, PMD, "RX frame count for q(%d) is %u\n",
+ rx_queue_id, frm_cnt);
+ }
+ return frm_cnt;
+}
+
static int dpaa_link_down(struct rte_eth_dev *dev)
{
PMD_INIT_FUNC_TRACE();
.tx_queue_setup = dpaa_eth_tx_queue_setup,
.rx_queue_release = dpaa_eth_rx_queue_release,
.tx_queue_release = dpaa_eth_tx_queue_release,
+ .rx_queue_count = dpaa_dev_rx_queue_count,
.flow_ctrl_get = dpaa_flow_ctrl_get,
.flow_ctrl_set = dpaa_flow_ctrl_set,
.fw_version_get = dpaa_fw_version_get,
};
+static bool
+is_device_supported(struct rte_eth_dev *dev, struct rte_dpaa_driver *drv)
+{
+ if (strcmp(dev->device->driver->name,
+ drv->driver.name))
+ return false;
+
+ return true;
+}
+
+static bool
+is_dpaa_supported(struct rte_eth_dev *dev)
+{
+ return is_device_supported(dev, &rte_dpaa_pmd);
+}
+
+int
+rte_pmd_dpaa_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct dpaa_if *dpaa_intf;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (!is_dpaa_supported(dev))
+ return -ENOTSUP;
+
+ dpaa_intf = dev->data->dev_private;
+
+ if (on)
+ fman_if_loopback_enable(dpaa_intf->fif);
+ else
+ fman_if_loopback_disable(dpaa_intf->fif);
+
+ return 0;
+}
+
static int dpaa_fc_set_default(struct dpaa_if *dpaa_intf)
{
struct rte_eth_fc_conf *fc_conf;
fqid, ret);
return ret;
}
+ fq->is_static = false;
- opts.we_mask = QM_INITFQ_WE_DESTWQ | QM_INITFQ_WE_FQCTRL |
- QM_INITFQ_WE_CONTEXTA;
-
- opts.fqd.dest.wq = DPAA_IF_RX_PRIORITY;
- opts.fqd.fq_ctrl = QM_FQCTRL_AVOIDBLOCK | QM_FQCTRL_CTXASTASHING |
- QM_FQCTRL_PREFERINCACHE;
- opts.fqd.context_a.stashing.exclusive = 0;
- opts.fqd.context_a.stashing.annotation_cl = DPAA_IF_RX_ANNOTATION_STASH;
- opts.fqd.context_a.stashing.data_cl = DPAA_IF_RX_DATA_STASH;
- opts.fqd.context_a.stashing.context_cl = DPAA_IF_RX_CONTEXT_STASH;
+ dpaa_poll_queue_default_config(&opts);
if (cgr_rx) {
/* Enable tail drop with cgr on this queue */
else
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
+ /* if push mode queues to be enabled. Currenly we are allowing only
+ * one queue per thread.
+ */
+ if (getenv("DPAA_PUSH_QUEUES_NUMBER")) {
+ dpaa_push_mode_max_queue =
+ atoi(getenv("DPAA_PUSH_QUEUES_NUMBER"));
+ if (dpaa_push_mode_max_queue > DPAA_MAX_PUSH_MODE_QUEUE)
+ dpaa_push_mode_max_queue = DPAA_MAX_PUSH_MODE_QUEUE;
+ }
+
/* Each device can not have more than DPAA_PCD_FQID_MULTIPLIER RX
* queues.
*/
dpaa_intf->rx_queues = rte_zmalloc(NULL,
sizeof(struct qman_fq) * num_rx_fqs, MAX_CACHELINE);
+ if (!dpaa_intf->rx_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for RX queues\n");
+ return -ENOMEM;
+ }
/* If congestion control is enabled globally*/
if (td_threshold) {
dpaa_intf->cgr_rx = rte_zmalloc(NULL,
sizeof(struct qman_cgr) * num_rx_fqs, MAX_CACHELINE);
+ if (!dpaa_intf->cgr_rx) {
+ DPAA_PMD_ERR("Failed to alloc mem for cgr_rx\n");
+ ret = -ENOMEM;
+ goto free_rx;
+ }
ret = qman_alloc_cgrid_range(&cgrid[0], num_rx_fqs, 1, 0);
if (ret != num_rx_fqs) {
DPAA_PMD_WARN("insufficient CGRIDs available");
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_rx;
}
} else {
dpaa_intf->cgr_rx = NULL;
dpaa_intf->cgr_rx ? &dpaa_intf->cgr_rx[loop] : NULL,
fqid);
if (ret)
- return ret;
+ goto free_rx;
dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
}
dpaa_intf->nb_rx_queues = num_rx_fqs;
- /* Initialise Tx FQs. Have as many Tx FQ's as number of cores */
+ /* Initialise Tx FQs.free_rx Have as many Tx FQ's as number of cores */
num_cores = rte_lcore_count();
dpaa_intf->tx_queues = rte_zmalloc(NULL, sizeof(struct qman_fq) *
num_cores, MAX_CACHELINE);
- if (!dpaa_intf->tx_queues)
- return -ENOMEM;
+ if (!dpaa_intf->tx_queues) {
+ DPAA_PMD_ERR("Failed to alloc mem for TX queues\n");
+ ret = -ENOMEM;
+ goto free_rx;
+ }
for (loop = 0; loop < num_cores; loop++) {
ret = dpaa_tx_queue_init(&dpaa_intf->tx_queues[loop],
fman_intf);
if (ret)
- return ret;
+ goto free_tx;
dpaa_intf->tx_queues[loop].dpaa_intf = dpaa_intf;
}
dpaa_intf->nb_tx_queues = num_cores;
DPAA_PMD_ERR("Failed to allocate %d bytes needed to "
"store MAC addresses",
ETHER_ADDR_LEN * DPAA_MAX_MAC_FILTER);
- rte_free(dpaa_intf->cgr_rx);
- rte_free(dpaa_intf->rx_queues);
- rte_free(dpaa_intf->tx_queues);
- dpaa_intf->rx_queues = NULL;
- dpaa_intf->tx_queues = NULL;
- dpaa_intf->nb_rx_queues = 0;
- dpaa_intf->nb_tx_queues = 0;
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto free_tx;
}
/* copy the primary mac address */
fman_if_stats_reset(fman_intf);
return 0;
+
+free_tx:
+ rte_free(dpaa_intf->tx_queues);
+ dpaa_intf->tx_queues = NULL;
+ dpaa_intf->nb_tx_queues = 0;
+
+free_rx:
+ rte_free(dpaa_intf->cgr_rx);
+ rte_free(dpaa_intf->rx_queues);
+ dpaa_intf->rx_queues = NULL;
+ dpaa_intf->nb_rx_queues = 0;
+ return ret;
}
static int