+ struct dpaa2_port *dpaa2_portal = port;
+ struct dpaa2_dpio_dev *dpio_dev;
+ uint32_t queue_id = ev[0].queue_id;
+ struct dpaa2_eventq *evq_info;
+ uint32_t fqid, retry_count;
+ struct qbman_swp *swp;
+ struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+ uint32_t loop, frames_to_send;
+ struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+ uint16_t num_tx = 0;
+ int i, n, ret;
+ uint8_t channel_index;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ /* Affine current thread context to a qman portal */
+ ret = dpaa2_affine_qbman_swp();
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR("Failure in affining portal");
+ return 0;
+ }
+ }
+ /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
+ dpio_dev = DPAA2_PER_LCORE_DPIO;
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ if (likely(dpaa2_portal->is_port_linked))
+ goto skip_linking;
+
+ /* Create mapping between portal and channel to receive packets */
+ for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
+ evq_info = &dpaa2_portal->evq_info[i];
+ if (!evq_info->event_port)
+ continue;
+
+ ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
+ CMD_PRI_LOW,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id,
+ &channel_index);
+ if (ret < 0) {
+ DPAA2_EVENTDEV_ERR(
+ "Static dequeue config failed: err(%d)", ret);
+ goto err;
+ }
+
+ qbman_swp_push_set(swp, channel_index, 1);
+ evq_info->dpcon->channel_index = channel_index;
+ }
+ dpaa2_portal->is_port_linked = true;
+
+skip_linking:
+ evq_info = &dpaa2_portal->evq_info[queue_id];
+
+ while (nb_events) {
+ frames_to_send = (nb_events > dpaa2_eqcr_size) ?
+ dpaa2_eqcr_size : nb_events;
+
+ for (loop = 0; loop < frames_to_send; loop++) {
+ const struct rte_event *event = &ev[num_tx + loop];
+
+ if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
+ fqid = evq_info->dpci->rx_queue[
+ DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
+ else
+ fqid = evq_info->dpci->rx_queue[
+ DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
+
+ /* Prepare enqueue descriptor */
+ qbman_eq_desc_clear(&eqdesc[loop]);
+ qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
+ qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
+ qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
+
+ if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
+ && event->mbuf->seqn) {
+ uint8_t dqrr_index = event->mbuf->seqn - 1;
+
+ qbman_eq_desc_set_dca(&eqdesc[loop], 1,
+ dqrr_index, 0);
+ DPAA2_PER_LCORE_DQRR_SIZE--;
+ DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
+ }
+
+ memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
+
+ /*
+ * todo - need to align with hw context data
+ * to avoid copy
+ */
+ struct rte_event *ev_temp = rte_malloc(NULL,
+ sizeof(struct rte_event), 0);
+
+ if (!ev_temp) {
+ if (!loop)
+ return num_tx;
+ frames_to_send = loop;
+ DPAA2_EVENTDEV_ERR(
+ "Unable to allocate event object");
+ goto send_partial;
+ }
+ rte_memcpy(ev_temp, event, sizeof(struct rte_event));
+ DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
+ DPAA2_SET_FD_LEN((&fd_arr[loop]),
+ sizeof(struct rte_event));
+ }
+send_partial:
+ loop = 0;
+ retry_count = 0;
+ while (loop < frames_to_send) {
+ ret = qbman_swp_enqueue_multiple_desc(swp,
+ &eqdesc[loop], &fd_arr[loop],
+ frames_to_send - loop);
+ if (unlikely(ret < 0)) {
+ retry_count++;
+ if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
+ num_tx += loop;
+ nb_events -= loop;
+ return num_tx + loop;
+ }
+ } else {
+ loop += ret;
+ retry_count = 0;
+ }
+ }
+ num_tx += loop;
+ nb_events -= loop;
+ }
+
+ return num_tx;
+err:
+ for (n = 0; n < i; n++) {
+ evq_info = &dpaa2_portal->evq_info[n];
+ if (!evq_info->event_port)
+ continue;
+ qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
+ dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
+ dpio_dev->token,
+ evq_info->dpcon->dpcon_id);
+ }