net/dpaa2: support multiple Tx queues enqueue for ordered
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
index f40369e..81b28e2 100644 (file)
@@ -114,7 +114,7 @@ dpaa2_dev_rx_parse_new(struct rte_mbuf *m, const struct qbman_fd *fd,
                m->packet_type = dpaa2_dev_rx_parse_slow(m, annotation);
        }
        m->hash.rss = fd->simple.flc_hi;
-       m->ol_flags |= PKT_RX_RSS_HASH;
+       m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
 
        if (dpaa2_enable_ts[m->port]) {
                *dpaa2_timestamp_dynfield(m) = annotation->word2;
@@ -140,21 +140,23 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
                        annotation->word3, annotation->word4);
 
 #if defined(RTE_LIBRTE_IEEE1588)
-       if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP))
-               mbuf->ol_flags |= PKT_RX_IEEE1588_PTP;
+       if (BIT_ISSET_AT_POS(annotation->word1, DPAA2_ETH_FAS_PTP)) {
+               mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP;
+               mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_TMST;
+       }
 #endif
 
        if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_1_PRESENT)) {
                vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
                        (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
                mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
-               mbuf->ol_flags |= PKT_RX_VLAN;
+               mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN;
                pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
        } else if (BIT_ISSET_AT_POS(annotation->word3, L2_VLAN_N_PRESENT)) {
                vlan_tci = rte_pktmbuf_mtod_offset(mbuf, uint16_t *,
                        (VLAN_TCI_OFFSET_1(annotation->word5) >> 16));
                mbuf->vlan_tci = rte_be_to_cpu_16(*vlan_tci);
-               mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_QINQ;
+               mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_QINQ;
                pkt_type |= RTE_PTYPE_L2_ETHER_QINQ;
        }
 
@@ -189,9 +191,9 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
        }
 
        if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
-               mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+               mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
        else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
-               mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+               mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
 
        if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
            L3_IP_1_MORE_FRAGMENT |
@@ -232,9 +234,9 @@ dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, void *hw_annot_addr)
                           annotation->word4);
 
        if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
-               mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+               mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
        else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
-               mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+               mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
 
        if (dpaa2_enable_ts[mbuf->port]) {
                *dpaa2_timestamp_dynfield(mbuf) = annotation->word2;
@@ -714,7 +716,7 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
        rte_prefetch0((void *)(size_t)(dq_storage + 1));
 
        /* Prepare next pull descriptor. This will give space for the
-        * prefething done on DQRR entries
+        * prefetching done on DQRR entries
         */
        q_storage->toggle ^= 1;
        dq_storage1 = q_storage->dq_storage[q_storage->toggle];
@@ -769,11 +771,14 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                else
                        bufs[num_rx] = eth_fd_to_mbuf(fd, eth_data->port_id);
 #if defined(RTE_LIBRTE_IEEE1588)
-               priv->rx_timestamp = *dpaa2_timestamp_dynfield(bufs[num_rx]);
+               if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+                       priv->rx_timestamp =
+                               *dpaa2_timestamp_dynfield(bufs[num_rx]);
+               }
 #endif
 
                if (eth_data->dev_conf.rxmode.offloads &
-                               DEV_RX_OFFLOAD_VLAN_STRIP)
+                               RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
                        rte_vlan_strip(bufs[num_rx]);
 
                dq_storage++;
@@ -986,8 +991,15 @@ dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                bufs[num_rx] = eth_fd_to_mbuf(fd,
                                                        eth_data->port_id);
 
+#if defined(RTE_LIBRTE_IEEE1588)
+               if (bufs[num_rx]->ol_flags & PKT_RX_IEEE1588_TMST) {
+                       priv->rx_timestamp =
+                               *dpaa2_timestamp_dynfield(bufs[num_rx]);
+               }
+#endif
+
                if (eth_data->dev_conf.rxmode.offloads &
-                               DEV_RX_OFFLOAD_VLAN_STRIP) {
+                               RTE_ETH_RX_OFFLOAD_VLAN_STRIP) {
                        rte_vlan_strip(bufs[num_rx]);
                }
 
@@ -1021,6 +1033,8 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
        struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
        struct dpaa2_dev_priv *priv = eth_data->dev_private;
        struct dpaa2_annot_hdr *annotation;
+       void *v_addr;
+       struct rte_mbuf *mbuf;
 #endif
 
        if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
@@ -1105,10 +1119,16 @@ uint16_t dpaa2_dev_tx_conf(void *queue)
                        num_tx_conf++;
                        num_pulled++;
 #if defined(RTE_LIBRTE_IEEE1588)
-                       annotation = (struct dpaa2_annot_hdr *)((size_t)
-                               DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
-                               DPAA2_FD_PTA_SIZE);
-                       priv->tx_timestamp = annotation->word2;
+                       v_addr = DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
+                       mbuf = DPAA2_INLINE_MBUF_FROM_BUF(v_addr,
+                               rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
+
+                       if (mbuf->ol_flags & PKT_TX_IEEE1588_TMST) {
+                               annotation = (struct dpaa2_annot_hdr *)((size_t)
+                                       DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)) +
+                                       DPAA2_FD_PTA_SIZE);
+                               priv->tx_timestamp = annotation->word2;
+                       }
 #endif
                } while (pending);
 
@@ -1184,8 +1204,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
         * corresponding to last packet transmitted for reading
         * the timestamp
         */
-       priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
-       dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+       if ((*bufs)->ol_flags & PKT_TX_IEEE1588_TMST) {
+               priv->next_tx_conf_queue = dpaa2_q->tx_conf_queue;
+               dpaa2_dev_tx_conf(dpaa2_q->tx_conf_queue);
+               priv->tx_timestamp = 0;
+       }
 #endif
 
        /*Prepare enqueue descriptor*/
@@ -1228,9 +1251,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                    (*bufs)->nb_segs == 1 &&
                                    rte_mbuf_refcnt_read((*bufs)) == 1)) {
                                        if (unlikely(((*bufs)->ol_flags
-                                               & PKT_TX_VLAN_PKT) ||
+                                               & RTE_MBUF_F_TX_VLAN) ||
                                                (eth_data->dev_conf.txmode.offloads
-                                               & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+                                               & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
                                                ret = rte_vlan_insert(bufs);
                                                if (ret)
                                                        goto send_n_return;
@@ -1271,9 +1294,9 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                goto send_n_return;
                        }
 
-                       if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+                       if (unlikely(((*bufs)->ol_flags & RTE_MBUF_F_TX_VLAN) ||
                                (eth_data->dev_conf.txmode.offloads
-                               & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+                               & RTE_ETH_TX_OFFLOAD_VLAN_INSERT))) {
                                int ret = rte_vlan_insert(bufs);
                                if (ret)
                                        goto send_n_return;
@@ -1445,6 +1468,148 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
        *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
 }
 
+uint16_t
+dpaa2_dev_tx_multi_txq_ordered(void **queue,
+               struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+       /* Function to transmit the frames to multiple queues respectively.*/
+       uint32_t loop, retry_count;
+       int32_t ret;
+       struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+       uint32_t frames_to_send;
+       struct rte_mempool *mp;
+       struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+       struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
+       struct qbman_swp *swp;
+       uint16_t bpid;
+       struct rte_mbuf *mi;
+       struct rte_eth_dev_data *eth_data;
+       struct dpaa2_dev_priv *priv;
+       struct dpaa2_queue *order_sendq;
+
+       if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+               ret = dpaa2_affine_qbman_swp();
+               if (ret) {
+                       DPAA2_PMD_ERR(
+                               "Failed to allocate IO portal, tid: %d\n",
+                               rte_gettid());
+                       return 0;
+               }
+       }
+       swp = DPAA2_PER_LCORE_PORTAL;
+
+       for (loop = 0; loop < nb_pkts; loop++) {
+               dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
+               eth_data = dpaa2_q[loop]->eth_data;
+               priv = eth_data->dev_private;
+               qbman_eq_desc_clear(&eqdesc[loop]);
+               if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
+                       order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+                       dpaa2_set_enqueue_descriptor(order_sendq,
+                                                            (*bufs),
+                                                            &eqdesc[loop]);
+               } else {
+                       qbman_eq_desc_set_no_orp(&eqdesc[loop],
+                                                        DPAA2_EQ_RESP_ERR_FQ);
+                       qbman_eq_desc_set_fq(&eqdesc[loop],
+                                                    dpaa2_q[loop]->fqid);
+               }
+
+               retry_count = 0;
+               while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
+                       retry_count++;
+                       /* Retry for some time before giving up */
+                       if (retry_count > CONG_RETRY_COUNT)
+                               goto send_frames;
+               }
+
+               if (likely(RTE_MBUF_DIRECT(*bufs))) {
+                       mp = (*bufs)->pool;
+                       /* Check the basic scenario and set
+                        * the FD appropriately here itself.
+                        */
+                       if (likely(mp && mp->ops_index ==
+                               priv->bp_list->dpaa2_ops_index &&
+                               (*bufs)->nb_segs == 1 &&
+                               rte_mbuf_refcnt_read((*bufs)) == 1)) {
+                               if (unlikely((*bufs)->ol_flags
+                                       & RTE_MBUF_F_TX_VLAN)) {
+                                       ret = rte_vlan_insert(bufs);
+                                       if (ret)
+                                               goto send_frames;
+                               }
+                               DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+                                       &fd_arr[loop],
+                                       mempool_to_bpid(mp));
+                               bufs++;
+                               dpaa2_q[loop]++;
+                               continue;
+                       }
+               } else {
+                       mi = rte_mbuf_from_indirect(*bufs);
+                       mp = mi->pool;
+               }
+               /* Not a hw_pkt pool allocated frame */
+               if (unlikely(!mp || !priv->bp_list)) {
+                       DPAA2_PMD_ERR("Err: No buffer pool attached");
+                       goto send_frames;
+               }
+
+               if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+                       DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+                       /* alloc should be from the default buffer pool
+                        * attached to this interface
+                        */
+                       bpid = priv->bp_list->buf_pool.bpid;
+
+                       if (unlikely((*bufs)->nb_segs > 1)) {
+                               DPAA2_PMD_ERR(
+                                       "S/G not supp for non hw offload buffer");
+                               goto send_frames;
+                       }
+                       if (eth_copy_mbuf_to_fd(*bufs,
+                                               &fd_arr[loop], bpid)) {
+                               goto send_frames;
+                       }
+                       /* free the original packet */
+                       rte_pktmbuf_free(*bufs);
+               } else {
+                       bpid = mempool_to_bpid(mp);
+                       if (unlikely((*bufs)->nb_segs > 1)) {
+                               if (eth_mbuf_to_sg_fd(*bufs,
+                                                     &fd_arr[loop],
+                                                     mp,
+                                                     bpid))
+                                       goto send_frames;
+                       } else {
+                               eth_mbuf_to_fd(*bufs,
+                                              &fd_arr[loop], bpid);
+                       }
+               }
+
+               bufs++;
+               dpaa2_q[loop]++;
+       }
+
+send_frames:
+       frames_to_send = loop;
+       loop = 0;
+       while (loop < frames_to_send) {
+               ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+                               &fd_arr[loop],
+                               frames_to_send - loop);
+               if (likely(ret > 0)) {
+                       loop += ret;
+               } else {
+                       retry_count++;
+                       if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+                               break;
+               }
+       }
+
+       return loop;
+}
+
 /* Callback to handle sending ordered packets through WRIOP based interface */
 uint16_t
 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
@@ -1510,7 +1675,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                        if (*dpaa2_seqn(*bufs)) {
                                /* Use only queue 0 for Tx in case of atomic/
                                 * ordered packets as packets can get unordered
-                                * when being tranmitted out from the interface
+                                * when being transmitted out from the interface
                                 */
                                dpaa2_set_enqueue_descriptor(order_sendq,
                                                             (*bufs),
@@ -1532,7 +1697,7 @@ dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                    (*bufs)->nb_segs == 1 &&
                                    rte_mbuf_refcnt_read((*bufs)) == 1)) {
                                        if (unlikely((*bufs)->ol_flags
-                                               & PKT_TX_VLAN_PKT)) {
+                                               & RTE_MBUF_F_TX_VLAN)) {
                                          ret = rte_vlan_insert(bufs);
                                          if (ret)
                                                goto send_n_return;
@@ -1738,7 +1903,7 @@ dpaa2_dev_loopback_rx(void *queue,
        rte_prefetch0((void *)(size_t)(dq_storage + 1));
 
        /* Prepare next pull descriptor. This will give space for the
-        * prefething done on DQRR entries
+        * prefetching done on DQRR entries
         */
        q_storage->toggle ^= 1;
        dq_storage1 = q_storage->dq_storage[q_storage->toggle];