+ q_storage->active_dqs = dq_storage1;
+ q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+ set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
+
+ return num_rx;
+}
+
+static int
+dpdmai_dev_dequeue_multijob_no_prefetch(
+ struct qdma_virt_queue *qdma_vq,
+ uint16_t *vq_id,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ uint16_t rxq_id = qdma_pq->queue_id;
+
+ struct dpaa2_queue *rxq;
+ struct qbman_result *dq_storage;
+ struct qbman_pull_desc pulldesc;
+ struct qbman_swp *swp;
+ uint32_t fqid;
+ uint8_t status, pending;
+ uint8_t num_rx = 0;
+ const struct qbman_fd *fd;
+ uint16_t vqid, num_rx_ret;
+ int ret, next_pull, num_pulled = 0;
+
+ if (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {
+ /** Make sure there are enough space to get jobs.*/
+ if (unlikely(nb_jobs < DPAA2_QDMA_MAX_SG_NB))
+ return -EINVAL;
+ nb_jobs = 1;
+ }
+
+ next_pull = nb_jobs;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ rxq = &(dpdmai_dev->rx_queue[rxq_id]);
+ fqid = rxq->fqid;
+
+ do {
+ dq_storage = rxq->q_storage->dq_storage[0];
+ /* Prepare dequeue descriptor */
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (uint64_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ if (next_pull > dpaa2_dqrr_size) {
+ qbman_pull_desc_set_numframes(&pulldesc,
+ dpaa2_dqrr_size);
+ next_pull -= dpaa2_dqrr_size;
+ } else {
+ qbman_pull_desc_set_numframes(&pulldesc, next_pull);
+ next_pull = 0;
+ }
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_QDMA_DP_WARN(
+ "VDQ command not issued. QBMAN busy");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ rte_prefetch0((void *)((size_t)(dq_storage + 1)));
+ /* Check if the previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+
+ num_pulled = 0;
+ pending = 1;
+
+ do {
+ /* Loop until dq_storage is updated
+ * with new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status &
+ QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ vqid = qdma_vq->get_job(qdma_vq, fd,
+ &job[num_rx], &num_rx_ret);
+ if (vq_id)
+ vq_id[num_rx] = vqid;
+
+ dq_storage++;
+ num_rx += num_rx_ret;
+ num_pulled++;
+
+ } while (pending);
+ /* Last VDQ provided all packets and more packets are requested */
+ } while (next_pull && num_pulled == dpaa2_dqrr_size);
+
+ return num_rx;
+}
+
+static int
+dpdmai_dev_enqueue_multi(
+ struct qdma_virt_queue *qdma_vq,
+ struct rte_qdma_job **job,
+ uint16_t nb_jobs)
+{
+ struct qdma_hw_queue *qdma_pq = qdma_vq->hw_queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = qdma_pq->dpdmai_dev;
+ uint16_t txq_id = qdma_pq->queue_id;
+
+ struct qbman_fd fd[RTE_QDMA_BURST_NB_MAX];
+ struct dpaa2_queue *txq;
+ struct qbman_eq_desc eqdesc;
+ struct qbman_swp *swp;
+ int ret;
+ uint32_t num_to_send = 0;
+ uint16_t num_tx = 0;
+ uint32_t enqueue_loop, retry_count, loop;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_QDMA_ERR(
+ "Failed to allocate IO portal, tid: %d\n",
+ rte_gettid());
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ txq = &(dpdmai_dev->tx_queue[txq_id]);
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc);
+ qbman_eq_desc_set_fq(&eqdesc, txq->fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc, 0);
+ qbman_eq_desc_set_response(&eqdesc, 0, 0);
+
+ if (qdma_vq->flags & RTE_QDMA_VQ_FD_SG_FORMAT) {
+ uint16_t fd_nb;
+ uint16_t sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
+ DPAA2_QDMA_MAX_SG_NB : nb_jobs;
+ uint16_t job_idx = 0;
+ uint16_t fd_sg_nb[8];
+ uint16_t nb_jobs_ret = 0;
+
+ if (nb_jobs % DPAA2_QDMA_MAX_SG_NB)
+ fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB + 1;
+ else
+ fd_nb = nb_jobs / DPAA2_QDMA_MAX_SG_NB;
+
+ memset(&fd[0], 0, sizeof(struct qbman_fd) * fd_nb);
+
+ for (loop = 0; loop < fd_nb; loop++) {
+ ret = qdma_vq->set_fd(qdma_vq, &fd[loop], &job[job_idx],
+ sg_entry_nb);
+ if (unlikely(ret < 0))
+ return 0;
+ fd_sg_nb[loop] = sg_entry_nb;
+ nb_jobs -= sg_entry_nb;
+ job_idx += sg_entry_nb;
+ sg_entry_nb = nb_jobs > DPAA2_QDMA_MAX_SG_NB ?
+ DPAA2_QDMA_MAX_SG_NB : nb_jobs;
+ }
+
+ /* Enqueue the packet to the QBMAN */
+ enqueue_loop = 0; retry_count = 0;
+
+ while (enqueue_loop < fd_nb) {
+ ret = qbman_swp_enqueue_multiple(swp,
+ &eqdesc, &fd[enqueue_loop],
+ NULL, fd_nb - enqueue_loop);
+ if (unlikely(ret < 0)) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+ return nb_jobs_ret;
+ } else {
+ for (loop = 0; loop < (uint32_t)ret; loop++)
+ nb_jobs_ret +=
+ fd_sg_nb[enqueue_loop + loop];
+ enqueue_loop += ret;
+ retry_count = 0;
+ }
+ }
+
+ return nb_jobs_ret;
+ }
+
+ memset(fd, 0, nb_jobs * sizeof(struct qbman_fd));
+
+ while (nb_jobs > 0) {
+ num_to_send = (nb_jobs > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_jobs;
+
+ ret = qdma_vq->set_fd(qdma_vq, &fd[num_tx],
+ &job[num_tx], num_to_send);
+ if (unlikely(ret < 0))
+ break;
+
+ /* Enqueue the packet to the QBMAN */
+ enqueue_loop = 0; retry_count = 0;
+ loop = num_to_send;
+
+ while (enqueue_loop < loop) {
+ ret = qbman_swp_enqueue_multiple(swp,
+ &eqdesc,
+ &fd[num_tx + enqueue_loop],
+ NULL,
+ loop - enqueue_loop);
+ if (unlikely(ret < 0)) {
+ retry_count++;
+ if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+ return num_tx;
+ } else {
+ enqueue_loop += ret;
+ retry_count = 0;
+ }
+ }
+ num_tx += num_to_send;
+ nb_jobs -= loop;
+ }
+ return num_tx;
+}
+
+static struct qdma_hw_queue *
+alloc_hw_queue(uint32_t lcore_id)
+{
+ struct qdma_hw_queue *queue = NULL;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* Get a free queue from the list */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next) {
+ if (queue->num_users == 0) {
+ queue->lcore_id = lcore_id;
+ queue->num_users++;
+ break;
+ }
+ }
+
+ return queue;
+}
+
+static void
+free_hw_queue(struct qdma_hw_queue *queue)
+{
+ DPAA2_QDMA_FUNC_TRACE();
+
+ queue->num_users--;
+}
+
+
+static struct qdma_hw_queue *
+get_hw_queue(struct qdma_device *qdma_dev, uint32_t lcore_id)
+{
+ struct qdma_per_core_info *core_info;
+ struct qdma_hw_queue *queue, *temp;
+ uint32_t least_num_users;
+ int num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+
+ /*
+ * Allocate a HW queue if there are less queues
+ * than maximum per core queues configured
+ */
+ if (num_hw_queues < qdma_dev->max_hw_queues_per_core) {
+ queue = alloc_hw_queue(lcore_id);
+ if (queue) {
+ core_info->hw_queues[num_hw_queues] = queue;
+ core_info->num_hw_queues++;
+ return queue;
+ }
+ }
+
+ queue = core_info->hw_queues[0];
+ /* In case there is no queue associated with the core return NULL */
+ if (!queue)
+ return NULL;
+
+ /* Fetch the least loaded H/W queue */
+ least_num_users = core_info->hw_queues[0]->num_users;
+ for (i = 0; i < num_hw_queues; i++) {
+ temp = core_info->hw_queues[i];
+ if (temp->num_users < least_num_users)
+ queue = temp;
+ }
+
+ if (queue)
+ queue->num_users++;
+
+ return queue;
+}
+
+static void
+put_hw_queue(struct qdma_hw_queue *queue)
+{
+ struct qdma_per_core_info *core_info;
+ int lcore_id, num_hw_queues, i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /*
+ * If this is the last user of the queue free it.
+ * Also remove it from QDMA core info.
+ */
+ if (queue->num_users == 1) {
+ free_hw_queue(queue);
+
+ /* Remove the physical queue from core info */
+ lcore_id = queue->lcore_id;
+ core_info = &qdma_core_info[lcore_id];
+ num_hw_queues = core_info->num_hw_queues;
+ for (i = 0; i < num_hw_queues; i++) {
+ if (queue == core_info->hw_queues[i])
+ break;
+ }
+ for (; i < num_hw_queues - 1; i++)
+ core_info->hw_queues[i] = core_info->hw_queues[i + 1];
+ core_info->hw_queues[i] = NULL;
+ } else {
+ queue->num_users--;
+ }
+}
+
+static int
+dpaa2_qdma_attr_get(struct rte_rawdev *rawdev,
+ __rte_unused const char *attr_name,
+ uint64_t *attr_value)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+ struct rte_qdma_attr *qdma_attr = (struct rte_qdma_attr *)attr_value;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_attr->num_hw_queues = qdma_dev->num_hw_queues;
+
+ return 0;
+}
+
+static int
+dpaa2_qdma_reset(struct rte_rawdev *rawdev)
+{
+ struct qdma_hw_queue *queue;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+ int i;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev->state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before reset.");
+ return -EBUSY;
+ }
+
+ /* In case there are pending jobs on any VQ, return -EBUSY */
+ for (i = 0; i < qdma_dev->max_vqs; i++) {
+ if (qdma_dev->vqs[i].in_use && (qdma_dev->vqs[i].num_enqueues !=
+ qdma_dev->vqs[i].num_dequeues)) {
+ DPAA2_QDMA_ERR("Jobs are still pending on VQ: %d", i);
+ return -EBUSY;
+ }
+ }
+
+ /* Reset HW queues */
+ TAILQ_FOREACH(queue, &qdma_queue_list, next)
+ queue->num_users = 0;
+
+ /* Reset and free virtual queues */
+ for (i = 0; i < qdma_dev->max_vqs; i++) {
+ if (qdma_dev->vqs[i].status_ring)
+ rte_ring_free(qdma_dev->vqs[i].status_ring);
+ }
+ if (qdma_dev->vqs)
+ rte_free(qdma_dev->vqs);
+ qdma_dev->vqs = NULL;
+
+ /* Reset per core info */
+ memset(&qdma_core_info, 0,
+ sizeof(struct qdma_per_core_info) * RTE_MAX_LCORE);
+
+ /* Reset QDMA device structure */
+ qdma_dev->max_hw_queues_per_core = 0;
+ qdma_dev->fle_queue_pool_cnt = 0;
+ qdma_dev->max_vqs = 0;
+
+ return 0;
+}
+
+static int
+dpaa2_qdma_configure(const struct rte_rawdev *rawdev,
+ rte_rawdev_obj_t config,
+ size_t config_size)
+{
+ char name[32]; /* RTE_MEMZONE_NAMESIZE = 32 */
+ struct rte_qdma_config *qdma_config = (struct rte_qdma_config *)config;
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ if (config_size != sizeof(*qdma_config))
+ return -EINVAL;
+
+ /* In case QDMA device is not in stopped state, return -EBUSY */
+ if (qdma_dev->state == 1) {
+ DPAA2_QDMA_ERR(
+ "Device is in running state. Stop before config.");
+ return -1;
+ }
+
+ /* Set max HW queue per core */
+ if (qdma_config->max_hw_queues_per_core > MAX_HW_QUEUE_PER_CORE) {
+ DPAA2_QDMA_ERR("H/W queues per core is more than: %d",
+ MAX_HW_QUEUE_PER_CORE);
+ return -EINVAL;
+ }
+ qdma_dev->max_hw_queues_per_core =
+ qdma_config->max_hw_queues_per_core;
+
+ /* Allocate Virtual Queues */
+ sprintf(name, "qdma_%d_vq", rawdev->dev_id);
+ qdma_dev->vqs = rte_malloc(name,
+ (sizeof(struct qdma_virt_queue) * qdma_config->max_vqs),
+ RTE_CACHE_LINE_SIZE);
+ if (!qdma_dev->vqs) {
+ DPAA2_QDMA_ERR("qdma_virtual_queues allocation failed");
+ return -ENOMEM;
+ }
+ qdma_dev->max_vqs = qdma_config->max_vqs;
+ qdma_dev->fle_queue_pool_cnt = qdma_config->fle_queue_pool_cnt;
+
+ return 0;
+}
+
+static int
+dpaa2_qdma_start(struct rte_rawdev *rawdev)
+{
+ struct dpaa2_dpdmai_dev *dpdmai_dev = rawdev->dev_private;
+ struct qdma_device *qdma_dev = dpdmai_dev->qdma_dev;
+
+ DPAA2_QDMA_FUNC_TRACE();
+
+ qdma_dev->state = 1;
+
+ return 0;
+}
+
+static int
+check_devargs_handler(__rte_unused const char *key, const char *value,
+ __rte_unused void *opaque)
+{
+ if (strcmp(value, "1"))
+ return -1;
+
+ return 0;
+}
+
+static int
+dpaa2_get_devargs(struct rte_devargs *devargs, const char *key)
+{
+ struct rte_kvargs *kvlist;
+
+ if (!devargs)
+ return 0;
+
+ kvlist = rte_kvargs_parse(devargs->args, NULL);
+ if (!kvlist)
+ return 0;
+
+ if (!rte_kvargs_count(kvlist, key)) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+
+ if (rte_kvargs_process(kvlist, key,
+ check_devargs_handler, NULL) < 0) {
+ rte_kvargs_free(kvlist);
+ return 0;
+ }
+ rte_kvargs_free(kvlist);