net/mlx5: support yellow meter action in hierarchy
[dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
index 9fb6c5f..c17f6eb 100644 (file)
@@ -179,6 +179,9 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
                if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
                        L3_IP_N_OPT_PRESENT))
                        pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
+               if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
+                                       L3_PROTO_ESP_PRESENT))
+                       pkt_type |= RTE_PTYPE_TUNNEL_ESP;
 
        } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
                  L3_IPV6_N_PRESENT)) {
@@ -186,6 +189,9 @@ dpaa2_dev_rx_parse_slow(struct rte_mbuf *mbuf,
                if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
                    L3_IP_N_OPT_PRESENT))
                        pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
+               if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_IPSEC_ESP_PRESENT |
+                                       L3_PROTO_ESP_PRESENT))
+                       pkt_type |= RTE_PTYPE_TUNNEL_ESP;
        } else {
                goto parse_done;
        }
@@ -312,6 +318,10 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
                        dpaa2_dev_rx_parse(first_seg, hw_annot_addr);
 
        rte_mbuf_refcnt_set(first_seg, 1);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       rte_mempool_check_cookies(rte_mempool_from_obj((void *)first_seg),
+                       (void **)&first_seg, 1, 1);
+#endif
        cur_seg = first_seg;
        while (!DPAA2_SG_IS_FINAL(sge)) {
                sge = &sgt[i++];
@@ -324,6 +334,10 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
                next_seg->data_len  = sge->length  & 0x1FFFF;
                first_seg->nb_segs += 1;
                rte_mbuf_refcnt_set(next_seg, 1);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+               rte_mempool_check_cookies(rte_mempool_from_obj((void *)next_seg),
+                               (void **)&next_seg, 1, 1);
+#endif
                cur_seg->next = next_seg;
                next_seg->next = NULL;
                cur_seg = next_seg;
@@ -331,6 +345,10 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd,
        temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
                rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
        rte_mbuf_refcnt_set(temp, 1);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+               rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
+                               (void **)&temp, 1, 1);
+#endif
        rte_pktmbuf_free_seg(temp);
 
        return (void *)first_seg;
@@ -356,6 +374,10 @@ eth_fd_to_mbuf(const struct qbman_fd *fd,
        mbuf->port = port_id;
        mbuf->next = NULL;
        rte_mbuf_refcnt_set(mbuf, 1);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
+                       (void **)&mbuf, 1, 1);
+#endif
 
        /* Parse the packet */
        /* parse results for LX2 are there in FRC field of FD.
@@ -404,6 +426,10 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
                        rte_mbuf_refcnt_update(temp, -1);
                } else {
                        DPAA2_SET_ONLY_FD_BPID(fd, bpid);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+                       rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
+                                       (void **)&temp, 1, 0);
+#endif
                }
                DPAA2_SET_FD_OFFSET(fd, offset);
        } else {
@@ -414,6 +440,10 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
                }
                DPAA2_SET_ONLY_FD_BPID(fd, bpid);
                DPAA2_SET_FD_OFFSET(fd, temp->data_off);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+               rte_mempool_check_cookies(rte_mempool_from_obj((void *)temp),
+                       (void **)&temp, 1, 0);
+#endif
        }
        DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
        DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
@@ -450,6 +480,10 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
                                } else {
                                        DPAA2_SET_FLE_BPID(sge,
                                                mempool_to_bpid(cur_seg->pool));
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+                               rte_mempool_check_cookies(rte_mempool_from_obj((void *)cur_seg),
+                                       (void **)&cur_seg, 1, 0);
+#endif
                                }
                        }
                        cur_seg = cur_seg->next;
@@ -500,6 +534,11 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
                        DPAA2_SET_FD_IVP(fd);
                        rte_mbuf_refcnt_update(mbuf, -1);
                }
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+               else
+                       rte_mempool_check_cookies(rte_mempool_from_obj((void *)mbuf),
+                               (void **)&mbuf, 1, 0);
+#endif
        } else if (RTE_MBUF_HAS_EXTBUF(mbuf)) {
                DPAA2_SET_FD_IVP(fd);
        } else {
@@ -539,6 +578,10 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
 
        DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
 
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+       rte_mempool_check_cookies(rte_mempool_from_obj((void *)m),
+               (void **)&m, 1, 0);
+#endif
        DPAA2_PMD_DP_DEBUG(
                "mbuf: %p, BMAN buf addr: %p, fdaddr: %" PRIx64 ", bpid: %d,"
                " meta: %d, off: %d, len: %d\n",
@@ -1260,6 +1303,11 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                        }
                                        DPAA2_MBUF_TO_CONTIG_FD((*bufs),
                                        &fd_arr[loop], mempool_to_bpid(mp));
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
+                                       rte_mempool_check_cookies
+                                               (rte_mempool_from_obj((void *)*bufs),
+                                               (void **)bufs, 1, 0);
+#endif
                                        bufs++;
 #ifdef RTE_LIBRTE_IEEE1588
                                        enable_tx_tstamp(&fd_arr[loop]);
@@ -1408,7 +1456,8 @@ skip_tx:
 }
 
 void
-dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci)
+dpaa2_dev_free_eqresp_buf(uint16_t eqresp_ci,
+                         __rte_unused struct dpaa2_queue *dpaa2_q)
 {
        struct dpaa2_dpio_dev *dpio_dev = DPAA2_PER_LCORE_DPIO;
        struct qbman_fd *fd;
@@ -1468,6 +1517,148 @@ dpaa2_set_enqueue_descriptor(struct dpaa2_queue *dpaa2_q,
        *dpaa2_seqn(m) = DPAA2_INVALID_MBUF_SEQN;
 }
 
+uint16_t
+dpaa2_dev_tx_multi_txq_ordered(void **queue,
+               struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+       /* Function to transmit the frames to multiple queues respectively.*/
+       uint32_t loop, retry_count;
+       int32_t ret;
+       struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
+       uint32_t frames_to_send;
+       struct rte_mempool *mp;
+       struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
+       struct dpaa2_queue *dpaa2_q[MAX_TX_RING_SLOTS];
+       struct qbman_swp *swp;
+       uint16_t bpid;
+       struct rte_mbuf *mi;
+       struct rte_eth_dev_data *eth_data;
+       struct dpaa2_dev_priv *priv;
+       struct dpaa2_queue *order_sendq;
+
+       if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+               ret = dpaa2_affine_qbman_swp();
+               if (ret) {
+                       DPAA2_PMD_ERR(
+                               "Failed to allocate IO portal, tid: %d\n",
+                               rte_gettid());
+                       return 0;
+               }
+       }
+       swp = DPAA2_PER_LCORE_PORTAL;
+
+       for (loop = 0; loop < nb_pkts; loop++) {
+               dpaa2_q[loop] = (struct dpaa2_queue *)queue[loop];
+               eth_data = dpaa2_q[loop]->eth_data;
+               priv = eth_data->dev_private;
+               qbman_eq_desc_clear(&eqdesc[loop]);
+               if (*dpaa2_seqn(*bufs) && priv->en_ordered) {
+                       order_sendq = (struct dpaa2_queue *)priv->tx_vq[0];
+                       dpaa2_set_enqueue_descriptor(order_sendq,
+                                                            (*bufs),
+                                                            &eqdesc[loop]);
+               } else {
+                       qbman_eq_desc_set_no_orp(&eqdesc[loop],
+                                                        DPAA2_EQ_RESP_ERR_FQ);
+                       qbman_eq_desc_set_fq(&eqdesc[loop],
+                                                    dpaa2_q[loop]->fqid);
+               }
+
+               retry_count = 0;
+               while (qbman_result_SCN_state(dpaa2_q[loop]->cscn)) {
+                       retry_count++;
+                       /* Retry for some time before giving up */
+                       if (retry_count > CONG_RETRY_COUNT)
+                               goto send_frames;
+               }
+
+               if (likely(RTE_MBUF_DIRECT(*bufs))) {
+                       mp = (*bufs)->pool;
+                       /* Check the basic scenario and set
+                        * the FD appropriately here itself.
+                        */
+                       if (likely(mp && mp->ops_index ==
+                               priv->bp_list->dpaa2_ops_index &&
+                               (*bufs)->nb_segs == 1 &&
+                               rte_mbuf_refcnt_read((*bufs)) == 1)) {
+                               if (unlikely((*bufs)->ol_flags
+                                       & RTE_MBUF_F_TX_VLAN)) {
+                                       ret = rte_vlan_insert(bufs);
+                                       if (ret)
+                                               goto send_frames;
+                               }
+                               DPAA2_MBUF_TO_CONTIG_FD((*bufs),
+                                       &fd_arr[loop],
+                                       mempool_to_bpid(mp));
+                               bufs++;
+                               dpaa2_q[loop]++;
+                               continue;
+                       }
+               } else {
+                       mi = rte_mbuf_from_indirect(*bufs);
+                       mp = mi->pool;
+               }
+               /* Not a hw_pkt pool allocated frame */
+               if (unlikely(!mp || !priv->bp_list)) {
+                       DPAA2_PMD_ERR("Err: No buffer pool attached");
+                       goto send_frames;
+               }
+
+               if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
+                       DPAA2_PMD_WARN("Non DPAA2 buffer pool");
+                       /* alloc should be from the default buffer pool
+                        * attached to this interface
+                        */
+                       bpid = priv->bp_list->buf_pool.bpid;
+
+                       if (unlikely((*bufs)->nb_segs > 1)) {
+                               DPAA2_PMD_ERR(
+                                       "S/G not supp for non hw offload buffer");
+                               goto send_frames;
+                       }
+                       if (eth_copy_mbuf_to_fd(*bufs,
+                                               &fd_arr[loop], bpid)) {
+                               goto send_frames;
+                       }
+                       /* free the original packet */
+                       rte_pktmbuf_free(*bufs);
+               } else {
+                       bpid = mempool_to_bpid(mp);
+                       if (unlikely((*bufs)->nb_segs > 1)) {
+                               if (eth_mbuf_to_sg_fd(*bufs,
+                                                     &fd_arr[loop],
+                                                     mp,
+                                                     bpid))
+                                       goto send_frames;
+                       } else {
+                               eth_mbuf_to_fd(*bufs,
+                                              &fd_arr[loop], bpid);
+                       }
+               }
+
+               bufs++;
+               dpaa2_q[loop]++;
+       }
+
+send_frames:
+       frames_to_send = loop;
+       loop = 0;
+       while (loop < frames_to_send) {
+               ret = qbman_swp_enqueue_multiple_desc(swp, &eqdesc[loop],
+                               &fd_arr[loop],
+                               frames_to_send - loop);
+               if (likely(ret > 0)) {
+                       loop += ret;
+               } else {
+                       retry_count++;
+                       if (retry_count > DPAA2_MAX_TX_RETRY_COUNT)
+                               break;
+               }
+       }
+
+       return loop;
+}
+
 /* Callback to handle sending ordered packets through WRIOP based interface */
 uint16_t
 dpaa2_dev_tx_ordered(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
@@ -1660,31 +1851,6 @@ skip_tx:
        return num_tx;
 }
 
-/**
- * Dummy DPDK callback for TX.
- *
- * This function is used to temporarily replace the real callback during
- * unsafe control operations on the queue, or in case of error.
- *
- * @param dpdk_txq
- *   Generic pointer to TX queue structure.
- * @param[in] pkts
- *   Packets to transmit.
- * @param pkts_n
- *   Number of packets in array.
- *
- * @return
- *   Number of packets successfully transmitted (<= pkts_n).
- */
-uint16_t
-dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
-{
-       (void)queue;
-       (void)bufs;
-       (void)nb_pkts;
-       return 0;
-}
-
 #if defined(RTE_TOOLCHAIN_GCC)
 #pragma GCC diagnostic push
 #pragma GCC diagnostic ignored "-Wcast-qual"