app/testpmd: setup DCB forwarding based on traffic class
[dpdk.git] / app / test-pmd / testpmd.c
index 43329ed..2e302bb 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,6 @@
 #include <rte_memcpy.h>
 #include <rte_memzone.h>
 #include <rte_launch.h>
-#include <rte_tailq.h>
 #include <rte_eal.h>
 #include <rte_per_lcore.h>
 #include <rte_lcore.h>
@@ -183,9 +182,6 @@ uint8_t dcb_config = 0;
 /* Whether the dcb is in testing status */
 uint8_t dcb_test = 0;
 
-/* DCB on and VT on mapping is default */
-enum dcb_queue_mapping_mode dcb_q_mapping = DCB_VT_Q_MAPPING;
-
 /*
  * Configurable number of RX/TX queues.
  */
@@ -299,6 +295,9 @@ struct rte_fdir_conf fdir_conf = {
                },
                .src_port_mask = 0xFFFF,
                .dst_port_mask = 0xFFFF,
+               .mac_addr_byte_mask = 0xFF,
+               .tunnel_type_mask = 1,
+               .tunnel_id_mask = 0xFFFFFFFF,
        },
        .drop_queue = 127,
 };
@@ -314,6 +313,8 @@ struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_a
 uint16_t nb_tx_queue_stats_mappings = 0;
 uint16_t nb_rx_queue_stats_mappings = 0;
 
+unsigned max_socket = 0;
+
 /* Forward function declarations */
 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
 static void check_all_ports_link_status(uint32_t port_mask);
@@ -333,7 +334,7 @@ find_next_port(portid_t p, struct rte_port *ports, int size)
        if (ports == NULL)
                rte_exit(-EINVAL, "failed to find a next port id\n");
 
-       while ((ports[p].enabled == 0) && (p < size))
+       while ((p < size) && (ports[p].enabled == 0))
                p++;
        return p;
 }
@@ -346,6 +347,7 @@ set_default_fwd_lcores_config(void)
 {
        unsigned int i;
        unsigned int nb_lc;
+       unsigned int sock_num;
 
        nb_lc = 0;
        for (i = 0; i < RTE_MAX_LCORE; i++) {
@@ -354,6 +356,12 @@ set_default_fwd_lcores_config(void)
                if (i == rte_get_master_lcore())
                        continue;
                fwd_lcores_cpuids[nb_lc++] = i;
+               sock_num = rte_lcore_to_socket_id(i) + 1;
+               if (sock_num > max_socket) {
+                       if (sock_num > RTE_MAX_NUMA_NODES)
+                               rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
+                       max_socket = sock_num;
+               }
        }
        nb_lcores = (lcoreid_t) nb_lc;
        nb_cfg_lcores = nb_lcores;
@@ -394,83 +402,24 @@ set_def_fwd_config(void)
 /*
  * Configuration initialisation done once at init time.
  */
-struct mbuf_ctor_arg {
-       uint16_t seg_buf_offset; /**< offset of data in data segment of mbuf. */
-       uint16_t seg_buf_size;   /**< size of data segment in mbuf. */
-};
-
-struct mbuf_pool_ctor_arg {
-       uint16_t seg_buf_size; /**< size of data segment in mbuf. */
-};
-
-static void
-testpmd_mbuf_ctor(struct rte_mempool *mp,
-                 void *opaque_arg,
-                 void *raw_mbuf,
-                 __attribute__((unused)) unsigned i)
-{
-       struct mbuf_ctor_arg *mb_ctor_arg;
-       struct rte_mbuf    *mb;
-
-       mb_ctor_arg = (struct mbuf_ctor_arg *) opaque_arg;
-       mb = (struct rte_mbuf *) raw_mbuf;
-
-       mb->pool         = mp;
-       mb->buf_addr     = (void *) ((char *)mb + mb_ctor_arg->seg_buf_offset);
-       mb->buf_physaddr = (uint64_t) (rte_mempool_virt2phy(mp, mb) +
-                       mb_ctor_arg->seg_buf_offset);
-       mb->buf_len      = mb_ctor_arg->seg_buf_size;
-       mb->ol_flags     = 0;
-       mb->data_off     = RTE_PKTMBUF_HEADROOM;
-       mb->nb_segs      = 1;
-       mb->tx_offload   = 0;
-       mb->vlan_tci     = 0;
-       mb->hash.rss     = 0;
-}
-
-static void
-testpmd_mbuf_pool_ctor(struct rte_mempool *mp,
-                      void *opaque_arg)
-{
-       struct mbuf_pool_ctor_arg      *mbp_ctor_arg;
-       struct rte_pktmbuf_pool_private *mbp_priv;
-
-       if (mp->private_data_size < sizeof(struct rte_pktmbuf_pool_private)) {
-               printf("%s(%s) private_data_size %d < %d\n",
-                      __func__, mp->name, (int) mp->private_data_size,
-                      (int) sizeof(struct rte_pktmbuf_pool_private));
-               return;
-       }
-       mbp_ctor_arg = (struct mbuf_pool_ctor_arg *) opaque_arg;
-       mbp_priv = rte_mempool_get_priv(mp);
-       mbp_priv->mbuf_data_room_size = mbp_ctor_arg->seg_buf_size;
-}
-
 static void
 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
                 unsigned int socket_id)
 {
        char pool_name[RTE_MEMPOOL_NAMESIZE];
        struct rte_mempool *rte_mp;
-       struct mbuf_pool_ctor_arg mbp_ctor_arg;
-       struct mbuf_ctor_arg mb_ctor_arg;
        uint32_t mb_size;
 
-       mbp_ctor_arg.seg_buf_size = (uint16_t) (RTE_PKTMBUF_HEADROOM +
-                                               mbuf_seg_size);
-       mb_ctor_arg.seg_buf_offset =
-               (uint16_t) RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_mbuf));
-       mb_ctor_arg.seg_buf_size = mbp_ctor_arg.seg_buf_size;
-       mb_size = mb_ctor_arg.seg_buf_offset + mb_ctor_arg.seg_buf_size;
+       mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
        mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
 
 #ifdef RTE_LIBRTE_PMD_XENVIRT
        rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
-                                   (unsigned) mb_mempool_cache,
-                                   sizeof(struct rte_pktmbuf_pool_private),
-                                   testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
-                                   testpmd_mbuf_ctor, &mb_ctor_arg,
-                                   socket_id, 0);
+               (unsigned) mb_mempool_cache,
+               sizeof(struct rte_pktmbuf_pool_private),
+               rte_pktmbuf_pool_init, NULL,
+               rte_pktmbuf_init, NULL,
+               socket_id, 0);
 
 
 
@@ -479,16 +428,13 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
                rte_mp = mempool_anon_create(pool_name, nb_mbuf, mb_size,
                                    (unsigned) mb_mempool_cache,
                                    sizeof(struct rte_pktmbuf_pool_private),
-                                   testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
-                                   testpmd_mbuf_ctor, &mb_ctor_arg,
+                                   rte_pktmbuf_pool_init, NULL,
+                                   rte_pktmbuf_init, NULL,
                                    socket_id, 0);
        else
-               rte_mp = rte_mempool_create(pool_name, nb_mbuf, mb_size,
-                                   (unsigned) mb_mempool_cache,
-                                   sizeof(struct rte_pktmbuf_pool_private),
-                                   testpmd_mbuf_pool_ctor, &mbp_ctor_arg,
-                                   testpmd_mbuf_ctor, &mb_ctor_arg,
-                                   socket_id, 0);
+               /* wrapper to rte_mempool_create() */
+               rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+                       mb_mempool_cache, 0, mbuf_seg_size, socket_id);
 
 #endif
 
@@ -509,7 +455,7 @@ check_socket_id(const unsigned int socket_id)
 {
        static int warning_once = 0;
 
-       if (socket_id >= MAX_SOCKET) {
+       if (socket_id >= max_socket) {
                if (!warning_once && numa_support)
                        printf("Warning: NUMA should be configured manually by"
                               " using --port-numa-config and"
@@ -529,9 +475,9 @@ init_config(void)
        struct rte_mempool *mbp;
        unsigned int nb_mbuf_per_pool;
        lcoreid_t  lc_id;
-       uint8_t port_per_socket[MAX_SOCKET];
+       uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
 
-       memset(port_per_socket,0,MAX_SOCKET);
+       memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
        /* Configuration of logical cores. */
        fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
                                sizeof(struct fwd_lcore *) * nb_lcores,
@@ -579,20 +525,6 @@ init_config(void)
                                                 socket_num);
        }
 
-       /* Configuration of Ethernet ports. */
-       ports = rte_zmalloc("testpmd: ports",
-                           sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
-                           RTE_CACHE_LINE_SIZE);
-       if (ports == NULL) {
-               rte_exit(EXIT_FAILURE,
-                               "rte_zmalloc(%d struct rte_port) failed\n",
-                               RTE_MAX_ETHPORTS);
-       }
-
-       /* enabled allocated ports */
-       for (pid = 0; pid < nb_ports; pid++)
-               ports[pid].enabled = 1;
-
        FOREACH_PORT(pid, ports) {
                port = &ports[pid];
                rte_eth_dev_info_get(pid, &port->dev_info);
@@ -622,7 +554,7 @@ init_config(void)
                if (param_total_num_mbufs)
                        nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
 
-               for (i = 0; i < MAX_SOCKET; i++) {
+               for (i = 0; i < max_socket; i++) {
                        nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
                        if (nb_mbuf)
                                mbuf_pool_create(mbuf_data_size,
@@ -1279,7 +1211,8 @@ all_ports_started(void)
        FOREACH_PORT(pi, ports) {
                port = &ports[pi];
                /* Check if there is a port which is not started */
-               if (port->port_status != RTE_PORT_STARTED)
+               if ((port->port_status != RTE_PORT_STARTED) &&
+                       (port->slave_flag == 0))
                        return 0;
        }
 
@@ -1295,7 +1228,8 @@ all_ports_stopped(void)
 
        FOREACH_PORT(pi, ports) {
                port = &ports[pi];
-               if (port->port_status != RTE_PORT_STOPPED)
+               if ((port->port_status != RTE_PORT_STOPPED) &&
+                       (port->slave_flag == 0))
                        return 0;
        }
 
@@ -1329,7 +1263,7 @@ port_is_closed(portid_t port_id)
 int
 start_port(portid_t pid)
 {
-       int diag, need_check_link_status = 0;
+       int diag, need_check_link_status = -1;
        portid_t pi;
        queueid_t qi;
        struct rte_port *port;
@@ -1340,6 +1274,9 @@ start_port(portid_t pid)
                return -1;
        }
 
+       if (port_id_is_invalid(pid, ENABLED_WARN))
+               return 0;
+
        if (init_fwd_streams() < 0) {
                printf("Fail from init_fwd_streams()\n");
                return -1;
@@ -1351,6 +1288,7 @@ start_port(portid_t pid)
                if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
                        continue;
 
+               need_check_link_status = 0;
                port = &ports[pi];
                if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
                                                 RTE_PORT_HANDLING) == 0) {
@@ -1471,9 +1409,9 @@ start_port(portid_t pid)
                need_check_link_status = 1;
        }
 
-       if (need_check_link_status && !no_link_check)
+       if (need_check_link_status == 1 && !no_link_check)
                check_all_ports_link_status(RTE_PORT_ALL);
-       else
+       else if (need_check_link_status == 0)
                printf("Please stop the ports first\n");
 
        printf("Done\n");
@@ -1495,10 +1433,14 @@ stop_port(portid_t pid)
                dcb_test = 0;
                dcb_config = 0;
        }
+
+       if (port_id_is_invalid(pid, ENABLED_WARN))
+               return;
+
        printf("Stopping ports...\n");
 
        FOREACH_PORT(pi, ports) {
-               if (!port_id_is_invalid(pid, DISABLED_WARN) && pid != pi)
+               if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
                        continue;
 
                port = &ports[pi];
@@ -1530,13 +1472,22 @@ close_port(portid_t pid)
                return;
        }
 
+       if (port_id_is_invalid(pid, ENABLED_WARN))
+               return;
+
        printf("Closing ports...\n");
 
        FOREACH_PORT(pi, ports) {
-               if (!port_id_is_invalid(pid, DISABLED_WARN) && pid != pi)
+               if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
                        continue;
 
                port = &ports[pi];
+               if (rte_atomic16_cmpset(&(port->port_status),
+                       RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
+                       printf("Port %d is already closed\n", pi);
+                       continue;
+               }
+
                if (rte_atomic16_cmpset(&(port->port_status),
                        RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
                        printf("Port %d is now not stopped\n", pi);
@@ -1608,8 +1559,6 @@ detach_port(uint8_t port_id)
                return;
        }
 
-       rte_eth_promiscuous_disable(port_id);
-
        if (rte_eth_dev_detach(port_id, name))
                return;
 
@@ -1637,6 +1586,9 @@ pmd_test_exit(void)
 {
        portid_t pt_id;
 
+       if (test_done == 0)
+               stop_packet_forwarding();
+
        FOREACH_PORT(pt_id, ports) {
                printf("Stopping port %d...", pt_id);
                fflush(stdout);
@@ -1873,6 +1825,22 @@ init_port_config(void)
        }
 }
 
+void set_port_slave_flag(portid_t slave_pid)
+{
+       struct rte_port *port;
+
+       port = &ports[slave_pid];
+       port->slave_flag = 1;
+}
+
+void clear_port_slave_flag(portid_t slave_pid)
+{
+       struct rte_port *port;
+
+       port = &ports[slave_pid];
+       port->slave_flag = 0;
+}
+
 const uint16_t vlan_tags[] = {
                0,  1,  2,  3,  4,  5,  6,  7,
                8,  9, 10, 11,  12, 13, 14, 15,
@@ -1881,115 +1849,131 @@ const uint16_t vlan_tags[] = {
 };
 
 static  int
-get_eth_dcb_conf(struct rte_eth_conf *eth_conf, struct dcb_config *dcb_conf)
+get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
+                enum dcb_mode_enable dcb_mode,
+                enum rte_eth_nb_tcs num_tcs,
+                uint8_t pfc_en)
 {
-        uint8_t i;
+       uint8_t i;
 
-       /*
-        * Builds up the correct configuration for dcb+vt based on the vlan tags array
-        * given above, and the number of traffic classes available for use.
-        */
-       if (dcb_conf->dcb_mode == DCB_VT_ENABLED) {
-               struct rte_eth_vmdq_dcb_conf vmdq_rx_conf;
-               struct rte_eth_vmdq_dcb_tx_conf vmdq_tx_conf;
+       /*
+        * Builds up the correct configuration for dcb+vt based on the vlan tags array
+        * given above, and the number of traffic classes available for use.
+        */
+       if (dcb_mode == DCB_VT_ENABLED) {
+               struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
+                               &eth_conf->rx_adv_conf.vmdq_dcb_conf;
+               struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
+                               &eth_conf->tx_adv_conf.vmdq_dcb_tx_conf;
 
                /* VMDQ+DCB RX and TX configrations */
-               vmdq_rx_conf.enable_default_pool = 0;
-               vmdq_rx_conf.default_pool = 0;
-               vmdq_rx_conf.nb_queue_pools =
-                       (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
-               vmdq_tx_conf.nb_queue_pools =
-                       (dcb_conf->num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
-
-               vmdq_rx_conf.nb_pool_maps = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
-               for (i = 0; i < vmdq_rx_conf.nb_pool_maps; i++) {
-                       vmdq_rx_conf.pool_map[i].vlan_id = vlan_tags[ i ];
-                       vmdq_rx_conf.pool_map[i].pools = 1 << (i % vmdq_rx_conf.nb_queue_pools);
+               vmdq_rx_conf->enable_default_pool = 0;
+               vmdq_rx_conf->default_pool = 0;
+               vmdq_rx_conf->nb_queue_pools =
+                       (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+               vmdq_tx_conf->nb_queue_pools =
+                       (num_tcs ==  ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
+
+               vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
+               for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
+                       vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
+                       vmdq_rx_conf->pool_map[i].pools =
+                               1 << (i % vmdq_rx_conf->nb_queue_pools);
                }
                for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
-                       vmdq_rx_conf.dcb_queue[i] = i;
-                       vmdq_tx_conf.dcb_queue[i] = i;
+                       vmdq_rx_conf->dcb_tc[i] = i;
+                       vmdq_tx_conf->dcb_tc[i] = i;
                }
 
-               /*set DCB mode of RX and TX of multiple queues*/
+               /* set DCB mode of RX and TX of multiple queues */
                eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
                eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
-               if (dcb_conf->pfc_en)
-                       eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
-               else
-                       eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
-
-               (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_dcb_conf, &vmdq_rx_conf,
-                                sizeof(struct rte_eth_vmdq_dcb_conf)));
-               (void)(rte_memcpy(&eth_conf->tx_adv_conf.vmdq_dcb_tx_conf, &vmdq_tx_conf,
-                                sizeof(struct rte_eth_vmdq_dcb_tx_conf)));
-       }
-       else {
-               struct rte_eth_dcb_rx_conf rx_conf;
-               struct rte_eth_dcb_tx_conf tx_conf;
-
-               /* queue mapping configuration of DCB RX and TX */
-               if (dcb_conf->num_tcs == ETH_4_TCS)
-                       dcb_q_mapping = DCB_4_TCS_Q_MAPPING;
-               else
-                       dcb_q_mapping = DCB_8_TCS_Q_MAPPING;
-
-               rx_conf.nb_tcs = dcb_conf->num_tcs;
-               tx_conf.nb_tcs = dcb_conf->num_tcs;
-
-               for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++){
-                       rx_conf.dcb_queue[i] = i;
-                       tx_conf.dcb_queue[i] = i;
+       } else {
+               struct rte_eth_dcb_rx_conf *rx_conf =
+                               &eth_conf->rx_adv_conf.dcb_rx_conf;
+               struct rte_eth_dcb_tx_conf *tx_conf =
+                               &eth_conf->tx_adv_conf.dcb_tx_conf;
+
+               rx_conf->nb_tcs = num_tcs;
+               tx_conf->nb_tcs = num_tcs;
+
+               for (i = 0; i < num_tcs; i++) {
+                       rx_conf->dcb_tc[i] = i;
+                       tx_conf->dcb_tc[i] = i;
                }
-               eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB;
+               eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
+               eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
                eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
-               if (dcb_conf->pfc_en)
-                       eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT|ETH_DCB_PFC_SUPPORT;
-               else
-                       eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
-
-               (void)(rte_memcpy(&eth_conf->rx_adv_conf.dcb_rx_conf, &rx_conf,
-                                sizeof(struct rte_eth_dcb_rx_conf)));
-               (void)(rte_memcpy(&eth_conf->tx_adv_conf.dcb_tx_conf, &tx_conf,
-                                sizeof(struct rte_eth_dcb_tx_conf)));
        }
 
+       if (pfc_en)
+               eth_conf->dcb_capability_en =
+                               ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
+       else
+               eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
+
        return 0;
 }
 
 int
-init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
+init_port_dcb_config(portid_t pid,
+                    enum dcb_mode_enable dcb_mode,
+                    enum rte_eth_nb_tcs num_tcs,
+                    uint8_t pfc_en)
 {
        struct rte_eth_conf port_conf;
+       struct rte_eth_dev_info dev_info;
        struct rte_port *rte_port;
        int retval;
-       uint16_t nb_vlan;
        uint16_t i;
 
-       /* rxq and txq configuration in dcb mode */
-       nb_rxq = 128;
-       nb_txq = 128;
+       rte_eth_dev_info_get(pid, &dev_info);
+
+       /* If dev_info.vmdq_pool_base is greater than 0,
+        * the queue id of vmdq pools is started after pf queues.
+        */
+       if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
+               printf("VMDQ_DCB multi-queue mode is nonsensical"
+                       " for port %d.", pid);
+               return -1;
+       }
+
+       /* Assume the ports in testpmd have the same dcb capability
+        * and has the same number of rxq and txq in dcb mode
+        */
+       if (dcb_mode == DCB_VT_ENABLED) {
+               nb_rxq = dev_info.max_rx_queues;
+               nb_txq = dev_info.max_tx_queues;
+       } else {
+               /*if vt is disabled, use all pf queues */
+               if (dev_info.vmdq_pool_base == 0) {
+                       nb_rxq = dev_info.max_rx_queues;
+                       nb_txq = dev_info.max_tx_queues;
+               } else {
+                       nb_rxq = (queueid_t)num_tcs;
+                       nb_txq = (queueid_t)num_tcs;
+
+               }
+       }
        rx_free_thresh = 64;
 
-       memset(&port_conf,0,sizeof(struct rte_eth_conf));
+       memset(&port_conf, 0, sizeof(struct rte_eth_conf));
        /* Enter DCB configuration status */
        dcb_config = 1;
 
-       nb_vlan = sizeof( vlan_tags )/sizeof( vlan_tags[ 0 ]);
        /*set configuration of DCB in vt mode and DCB in non-vt mode*/
-       retval = get_eth_dcb_conf(&port_conf, dcb_conf);
+       retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
        if (retval < 0)
                return retval;
 
        rte_port = &ports[pid];
-       memcpy(&rte_port->dev_conf, &port_conf,sizeof(struct rte_eth_conf));
+       memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
 
        rxtx_port_config(rte_port);
        /* VLAN filter */
        rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
-       for (i = 0; i < nb_vlan; i++){
+       for (i = 0; i < RTE_DIM(vlan_tags); i++)
                rx_vft_set(pid, vlan_tags[i], 1);
-       }
 
        rte_eth_macaddr_get(pid, &rte_port->eth_addr);
        map_port_queue_stats_mapping_registers(pid, rte_port);
@@ -1999,6 +1983,26 @@ init_port_dcb_config(portid_t pid,struct dcb_config *dcb_conf)
        return 0;
 }
 
+static void
+init_port(void)
+{
+       portid_t pid;
+
+       /* Configuration of Ethernet ports. */
+       ports = rte_zmalloc("testpmd: ports",
+                           sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
+                           RTE_CACHE_LINE_SIZE);
+       if (ports == NULL) {
+               rte_exit(EXIT_FAILURE,
+                               "rte_zmalloc(%d struct rte_port) failed\n",
+                               RTE_MAX_ETHPORTS);
+       }
+
+       /* enabled allocated ports */
+       for (pid = 0; pid < nb_ports; pid++)
+               ports[pid].enabled = 1;
+}
+
 int
 main(int argc, char** argv)
 {
@@ -2013,6 +2017,9 @@ main(int argc, char** argv)
        if (nb_ports == 0)
                RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
 
+       /* allocate port structures, and init them */
+       init_port();
+
        set_def_fwd_config();
        if (nb_lcores == 0)
                rte_panic("Empty set of forwarding logical cores - check the "