net/mvpp2: change default policer configuration
[dpdk.git] / drivers / net / dpaa2 / dpaa2_ethdev.c
index 4225339..880034f 100644 (file)
 #include "dpaa2_ethdev.h"
 #include <fsl_qbman_debug.h>
 
+/* Supported Rx offloads */
+static uint64_t dev_rx_offloads_sup =
+               DEV_RX_OFFLOAD_VLAN_STRIP |
+               DEV_RX_OFFLOAD_IPV4_CKSUM |
+               DEV_RX_OFFLOAD_UDP_CKSUM |
+               DEV_RX_OFFLOAD_TCP_CKSUM |
+               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
+               DEV_RX_OFFLOAD_VLAN_FILTER |
+               DEV_RX_OFFLOAD_JUMBO_FRAME;
+
+/* Rx offloads which cannot be disabled */
+static uint64_t dev_rx_offloads_nodis =
+               DEV_RX_OFFLOAD_SCATTER;
+
+/* Supported Tx offloads */
+static uint64_t dev_tx_offloads_sup =
+               DEV_TX_OFFLOAD_VLAN_INSERT |
+               DEV_TX_OFFLOAD_IPV4_CKSUM |
+               DEV_TX_OFFLOAD_UDP_CKSUM |
+               DEV_TX_OFFLOAD_TCP_CKSUM |
+               DEV_TX_OFFLOAD_SCTP_CKSUM |
+               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+
+/* Tx offloads which cannot be disabled */
+static uint64_t dev_tx_offloads_nodis =
+               DEV_TX_OFFLOAD_MULTI_SEGS |
+               DEV_TX_OFFLOAD_MT_LOCKFREE |
+               DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
 struct rte_dpaa2_xstats_name_off {
        char name[RTE_ETH_XSTATS_NAME_SIZE];
        uint8_t page_id; /* dpni statistics page id */
@@ -103,7 +132,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
                        goto next_mask;
                }
 
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (dev->data->dev_conf.rxmode.offloads &
+                       DEV_RX_OFFLOAD_VLAN_FILTER)
                        ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
                                                      priv->token, true);
                else
@@ -114,7 +144,8 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        }
 next_mask:
        if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+               if (dev->data->dev_conf.rxmode.offloads &
+                       DEV_RX_OFFLOAD_VLAN_EXTEND)
                        DPAA2_PMD_INFO("VLAN extend offload not supported");
        }
 
@@ -168,20 +199,18 @@ dpaa2_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->min_rx_bufsize = DPAA2_MIN_RX_BUF_SIZE;
        dev_info->max_rx_queues = (uint16_t)priv->nb_rx_queues;
        dev_info->max_tx_queues = (uint16_t)priv->nb_tx_queues;
-       dev_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM |
-               DEV_RX_OFFLOAD_TCP_CKSUM |
-               DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM;
-       dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_IPV4_CKSUM |
-               DEV_TX_OFFLOAD_UDP_CKSUM |
-               DEV_TX_OFFLOAD_TCP_CKSUM |
-               DEV_TX_OFFLOAD_SCTP_CKSUM |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+       dev_info->rx_offload_capa = dev_rx_offloads_sup |
+                                       dev_rx_offloads_nodis;
+       dev_info->tx_offload_capa = dev_tx_offloads_sup |
+                                       dev_tx_offloads_nodis;
        dev_info->speed_capa = ETH_LINK_SPEED_1G |
                        ETH_LINK_SPEED_2_5G |
                        ETH_LINK_SPEED_10G;
+
+       dev_info->max_hash_mac_addrs = 0;
+       dev_info->max_vfs = 0;
+       dev_info->max_vmdq_pools = ETH_16_POOLS;
+       dev_info->flow_type_rss_offloads = DPAA2_RSS_OFFLOAD_ALL;
 }
 
 static int
@@ -268,12 +297,33 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
        struct dpaa2_dev_priv *priv = dev->data->dev_private;
        struct fsl_mc_io *dpni = priv->hw;
        struct rte_eth_conf *eth_conf = &dev->data->dev_conf;
-       int rx_ip_csum_offload = false;
+       uint64_t rx_offloads = eth_conf->rxmode.offloads;
+       uint64_t tx_offloads = eth_conf->txmode.offloads;
+       int rx_l3_csum_offload = false;
+       int rx_l4_csum_offload = false;
+       int tx_l3_csum_offload = false;
+       int tx_l4_csum_offload = false;
        int ret;
 
        PMD_INIT_FUNC_TRACE();
 
-       if (eth_conf->rxmode.jumbo_frame == 1) {
+       /* Rx offloads validation */
+       if (dev_rx_offloads_nodis & ~rx_offloads) {
+               DPAA2_PMD_WARN(
+               "Rx offloads non configurable - requested 0x%" PRIx64
+               " ignored 0x%" PRIx64,
+                       rx_offloads, dev_rx_offloads_nodis);
+       }
+
+       /* Tx offloads validation */
+       if (dev_tx_offloads_nodis & ~tx_offloads) {
+               DPAA2_PMD_WARN(
+               "Tx offloads non configurable - requested 0x%" PRIx64
+               " ignored 0x%" PRIx64,
+                       tx_offloads, dev_tx_offloads_nodis);
+       }
+
+       if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
                if (eth_conf->rxmode.max_rx_pkt_len <= DPAA2_MAX_RX_PKT_LEN) {
                        ret = dpni_set_max_frame_length(dpni, CMD_PRI_LOW,
                                priv->token, eth_conf->rxmode.max_rx_pkt_len);
@@ -297,32 +347,44 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (eth_conf->rxmode.hw_ip_checksum)
-               rx_ip_csum_offload = true;
+       if (rx_offloads & DEV_RX_OFFLOAD_IPV4_CKSUM)
+               rx_l3_csum_offload = true;
+
+       if ((rx_offloads & DEV_RX_OFFLOAD_UDP_CKSUM) ||
+               (rx_offloads & DEV_RX_OFFLOAD_TCP_CKSUM))
+               rx_l4_csum_offload = true;
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-                              DPNI_OFF_RX_L3_CSUM, rx_ip_csum_offload);
+                              DPNI_OFF_RX_L3_CSUM, rx_l3_csum_offload);
        if (ret) {
                DPAA2_PMD_ERR("Error to set RX l3 csum:Error = %d", ret);
                return ret;
        }
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-                              DPNI_OFF_RX_L4_CSUM, rx_ip_csum_offload);
+                              DPNI_OFF_RX_L4_CSUM, rx_l4_csum_offload);
        if (ret) {
                DPAA2_PMD_ERR("Error to get RX l4 csum:Error = %d", ret);
                return ret;
        }
 
+       if (tx_offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)
+               tx_l3_csum_offload = true;
+
+       if ((tx_offloads & DEV_TX_OFFLOAD_UDP_CKSUM) ||
+               (tx_offloads & DEV_TX_OFFLOAD_TCP_CKSUM) ||
+               (tx_offloads & DEV_TX_OFFLOAD_SCTP_CKSUM))
+               tx_l4_csum_offload = true;
+
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-                              DPNI_OFF_TX_L3_CSUM, true);
+                              DPNI_OFF_TX_L3_CSUM, tx_l3_csum_offload);
        if (ret) {
                DPAA2_PMD_ERR("Error to set TX l3 csum:Error = %d", ret);
                return ret;
        }
 
        ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
-                              DPNI_OFF_TX_L4_CSUM, true);
+                              DPNI_OFF_TX_L4_CSUM, tx_l4_csum_offload);
        if (ret) {
                DPAA2_PMD_ERR("Error to get TX l4 csum:Error = %d", ret);
                return ret;
@@ -343,8 +405,7 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
                }
        }
 
-       if (eth_conf->rxmode.hw_vlan_filter)
-               dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+       dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
        /* update the current status */
        dpaa2_dev_link_update(dev, 0);
@@ -949,9 +1010,11 @@ dpaa2_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
 
        if (frame_size > ETHER_MAX_LEN)
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               dev->data->dev_conf.rxmode.offloads &=
+                                               DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               dev->data->dev_conf.rxmode.offloads &=
+                                               ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
@@ -1851,7 +1914,7 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
        eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
        eth_dev->tx_pkt_burst = dpaa2_dev_tx;
 
-       DPAA2_PMD_INFO("%s: netdev created", eth_dev->data->name);
+       RTE_LOG(INFO, PMD, "%s: netdev created\n", eth_dev->data->name);
        return 0;
 init_err:
        dpaa2_dev_uninit(eth_dev);
@@ -1944,7 +2007,6 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
        }
 
        eth_dev->device = &dpaa2_dev->device;
-       eth_dev->device->driver = &dpaa2_drv->driver;
 
        dpaa2_dev->eth_dev = eth_dev;
        eth_dev->data->rx_mbuf_alloc_failed = 0;
@@ -1954,8 +2016,10 @@ rte_dpaa2_probe(struct rte_dpaa2_driver *dpaa2_drv,
 
        /* Invoke PMD device initialization function */
        diag = dpaa2_dev_init(eth_dev);
-       if (diag == 0)
+       if (diag == 0) {
+               rte_eth_dev_probing_finish(eth_dev);
                return 0;
+       }
 
        if (rte_eal_process_type() == RTE_PROC_PRIMARY)
                rte_free(eth_dev->data->dev_private);
@@ -1987,9 +2051,7 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
 
 RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
 
-RTE_INIT(dpaa2_pmd_init_log);
-static void
-dpaa2_pmd_init_log(void)
+RTE_INIT(dpaa2_pmd_init_log)
 {
        dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
        if (dpaa2_logtype_pmd >= 0)