net/dpaa2: support new ethdev offload APIs
authorSunil Kumar Kori <sunil.kori@nxp.com>
Wed, 11 Apr 2018 11:05:40 +0000 (16:35 +0530)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 13 Apr 2018 22:43:30 +0000 (00:43 +0200)
Signed-off-by: Sunil Kumar Kori <sunil.kori@nxp.com>
drivers/net/dpaa2/dpaa2_ethdev.c
drivers/net/dpaa2/dpaa2_rxtx.c

index 4225339..54ab9eb 100644 (file)
@@ -103,7 +103,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                        goto next_mask;
                }
 
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (dev->data->dev_conf.rxmode.offloads &
+                       DEV_RX_OFFLOAD_VLAN_FILTER)
                        ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
                                                      priv->token, true);
                else
@@ -114,7 +115,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        }
 next_mask:
        if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+               if (dev->data->dev_conf.rxmode.offloads &
+                       DEV_RX_OFFLOAD_VLAN_EXTEND)
                        DPAA2_PMD_INFO("VLAN extend offload not supported");
        }
 
@@ -172,13 +174,20 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_RX_OFFLOAD_IPV4_CKSUM |
                DEV_RX_OFFLOAD_UDP_CKSUM |
                DEV_RX_OFFLOAD_TCP_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
+               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+               DEV_RX_OFFLOAD_VLAN_FILTER |
+               DEV_RX_OFFLOAD_VLAN_STRIP |
+               DEV_RX_OFFLOAD_JUMBO_FRAME |
+               DEV_RX_OFFLOAD_SCATTER;
        dev_info->tx_offload_capa =
                DEV_TX_OFFLOAD_IPV4_CKSUM |
                DEV_TX_OFFLOAD_UDP_CKSUM |
                DEV_TX_OFFLOAD_TCP_CKSUM |
                DEV_TX_OFFLOAD_SCTP_CKSUM |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+               DEV_TX_OFFLOAD_VLAN_INSERT |
+               DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+               DEV_TX_OFFLOAD_MULTI_SEGS;
        dev_info->speed_capa = ETH_LINK_SPEED_1G |
                        ETH_LINK_SPEED_2_5G |
                        ETH_LINK_SPEED_10G;
@@ -268,12 +277,33 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
        struct fsl_mc_io *dpni = priv->hw;
        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-       int rx_ip_csum_offload = false;
+       struct rte_eth_dev_info dev_info;
+       uint64_t rx_offloads = eth_conf->rxmode.offloads;
+       uint64_t tx_offloads = eth_conf->txmode.offloads;
+       int rx_l3_csum_offload = false;
+       int rx_l4_csum_offload = false;
+       int tx_l3_csum_offload = false;
+       int tx_l4_csum_offload = false;
        int ret;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (eth_conf->rxmode.jumbo_frame == 1) {
+       dpaa2_dev_info_get(dev, &dev_info);
+       if ((~(dev_info.rx_offload_capa) & rx_offloads) != 0) {
+               DPAA2_PMD_ERR("Some Rx offloads are not supported "
+                       "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+                       rx_offloads, dev_info.rx_offload_capa);
+               return -ENOTSUP;
+       }
+
+       if ((~(dev_info.tx_offload_capa) & tx_offloads) != 0) {
+               DPAA2_PMD_ERR("Some Tx offloads are not supported "
+                       "requested 0x%" PRIx64 " supported 0x%" PRIx64,
+                       tx_offloads, dev_info.tx_offload_capa);
+               return -ENOTSUP;
+       }
+
+       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
                if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
                        ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
                                priv->token, eth_conf->rxmode.max_rx_pkt_len);
@@ -297,32 +327,44 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (eth_conf->rxmode.hw_ip_checksum)
-               rx_ip_csum_offload = true;
+       if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+               rx_l3_csum_offload = true;
+
+       if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
+               (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
+               rx_l4_csum_offload = true;
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-                              DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
+                              DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
        if (ret) {
                DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
                return ret;
        }
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-                              DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
+                              DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
        if (ret) {
                DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
                return ret;
        }
 
+       if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+               tx_l3_csum_offload = true;
+
+       if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
+               (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+               (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+               tx_l4_csum_offload = true;
+
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-                              DPNI_OFF_TX_L3_CSUM, true);
+                              DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
        if (ret) {
                DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
                return ret;
        }
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-                              DPNI_OFF_TX_L4_CSUM, true);
+                              DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
        if (ret) {
                DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
                return ret;
@@ -343,8 +385,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (eth_conf->rxmode.hw_vlan_filter)
-               dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+       dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
        /* update the current status */
        dpaa2_dev_link_update(dev, 0);
@@ -949,9 +990,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
 
        if (frame_size > ETHER_MAX_LEN)
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               dev->data->dev_conf.rxmode.offloads &=
+                                               DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               dev->data->dev_conf.rxmode.offloads &=
+                                               ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
index d1cfe95..ef3a897 100644 (file)
@@ -317,12 +317,6 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
        struct qbman_sge *sgt, *sge = NULL;
        int i;
 
-       if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-               int ret = rte_vlan_insert(&mbuf);
-               if (ret)
-                       return ret;
-       }
-
        temp = rte_pktmbuf_alloc(mbuf->pool);
        if (temp == NULL) {
                DPAA2_PMD_DP_DEBUG("No memory to allocate S/G table\n");
@@ -389,13 +383,6 @@ static void __attribute__ ((noinline)) __attribute__((hot))
 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
               struct qbman_fd *fd, uint16_t bpid)
 {
-       if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-               if (rte_vlan_insert(&mbuf)) {
-                       rte_pktmbuf_free(mbuf);
-                       return;
-               }
-       }
-
        DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
        DPAA2_PMD_DP_DEBUG("mbuf =%p, mbuf->buf_addr =%p, off = %d,"
@@ -428,12 +415,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
        struct rte_mbuf *m;
        void *mb = NULL;
 
-       if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
-               int ret = rte_vlan_insert(&mbuf);
-               if (ret)
-                       return ret;
-       }
-
        if (rte_dpaa2_mbuf_alloc_bulk(
                rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
                DPAA2_PMD_DP_DEBUG("Unable to allocated DPAA2 buffer\n");
@@ -737,8 +718,10 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                    priv->bp_list->dpaa2_ops_index &&
                                    (*bufs)->nb_segs == 1 &&
                                    rte_mbuf_refcnt_read((*bufs)) == 1)) {
-                                       if (unlikely((*bufs)->ol_flags
-                                               & PKT_TX_VLAN_PKT)) {
+                                       if (unlikely(((*bufs)->ol_flags
+                                               & PKT_TX_VLAN_PKT) ||
+                                               (dev->data->dev_conf.txmode.offloads
+                                               & DEV_TX_OFFLOAD_VLAN_INSERT))) {
                                                ret = rte_vlan_insert(bufs);
                                                if (ret)
                                                        goto send_n_return;
@@ -758,6 +741,13 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
                                goto send_n_return;
                        }
 
+                       if (unlikely(((*bufs)->ol_flags & PKT_TX_VLAN_PKT) ||
+                               (dev->data->dev_conf.txmode.offloads
+                               & DEV_TX_OFFLOAD_VLAN_INSERT))) {
+                               int ret = rte_vlan_insert(bufs);
+                               if (ret)
+                                       goto send_n_return;
+                       }
                        if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
                                DPAA2_PMD_WARN("Non DPAA2 buffer pool");
                                /* alloc should be from the default buffer pool