app/testpmd: add extended Rx queue setup
[dpdk.git] / app / test-pmd / testpmd.c
index ccba71c..00cffae 100644 (file)
@@ -186,7 +186,7 @@ struct fwd_engine * fwd_engines[] = {
        NULL,
 };
 
-struct rte_mempool *mempools[RTE_MAX_NUMA_NODES];
+struct rte_mempool *mempools[RTE_MAX_NUMA_NODES * MAX_SEGS_BUFFER_SPLIT];
 uint16_t mempool_flags;
 
 struct fwd_config cur_fwd_config;
@@ -195,7 +195,10 @@ uint32_t retry_enabled;
 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
 
-uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
+uint32_t mbuf_data_size_n = 1; /* Number of specified mbuf sizes. */
+uint16_t mbuf_data_size[MAX_SEGS_BUFFER_SPLIT] = {
+       DEFAULT_MBUF_DATA_SIZE
+}; /**< Mbuf data space size. */
 uint32_t param_total_num_mbufs = 0;  /**< number of mbufs in all pools - if
                                       * specified on command-line. */
 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
@@ -206,6 +209,15 @@ uint16_t stats_period; /**< Period to show statistics (disabled by default) */
  */
 uint8_t f_quit;
 
+/*
+ * Configuration of packet segments used to scatter received packets
+ * if some of split features is configured.
+ */
+uint16_t rx_pkt_seg_lengths[MAX_SEGS_BUFFER_SPLIT];
+uint8_t  rx_pkt_nb_segs; /**< Number of segments to split */
+uint16_t rx_pkt_seg_offsets[MAX_SEGS_BUFFER_SPLIT];
+uint8_t  rx_pkt_nb_offs; /**< Number of specified offsets */
+
 /*
  * Configuration of packet segments used by the "txonly" processing engine.
  */
@@ -367,6 +379,9 @@ bool setup_on_probe_event = true;
 /* Clear ptypes on port initialization. */
 uint8_t clear_ptypes = true;
 
+/* Hairpin ports configuration mode. */
+uint16_t hairpin_mode;
+
 /* Pretty printing of ethdev events */
 static const char * const eth_event_desc[] = {
        [RTE_ETH_EVENT_UNKNOWN] = "unknown",
@@ -955,14 +970,14 @@ setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
  */
 static struct rte_mempool *
 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
-                unsigned int socket_id)
+                unsigned int socket_id, uint16_t size_idx)
 {
        char pool_name[RTE_MEMPOOL_NAMESIZE];
        struct rte_mempool *rte_mp = NULL;
        uint32_t mb_size;
 
        mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
-       mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
+       mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
 
        TESTPMD_LOG(INFO,
                "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
@@ -1485,8 +1500,8 @@ init_config(void)
                                port->dev_info.rx_desc_lim.nb_mtu_seg_max;
 
                        if ((data_size + RTE_PKTMBUF_HEADROOM) >
-                                                       mbuf_data_size) {
-                               mbuf_data_size = data_size +
+                                                       mbuf_data_size[0]) {
+                               mbuf_data_size[0] = data_size +
                                                 RTE_PKTMBUF_HEADROOM;
                                warning = 1;
                        }
@@ -1494,9 +1509,9 @@ init_config(void)
        }
 
        if (warning)
-               TESTPMD_LOG(WARNING, "Configured mbuf size %hu\n",
-                           mbuf_data_size);
-
+               TESTPMD_LOG(WARNING,
+                           "Configured mbuf size of the first segment %hu\n",
+                           mbuf_data_size[0]);
        /*
         * Create pools of mbuf.
         * If NUMA support is disabled, create a single pool of mbuf in
@@ -1516,21 +1531,23 @@ init_config(void)
        }
 
        if (numa_support) {
-               uint8_t i;
+               uint8_t i, j;
 
                for (i = 0; i < num_sockets; i++)
-                       mempools[i] = mbuf_pool_create(mbuf_data_size,
-                                                      nb_mbuf_per_pool,
-                                                      socket_ids[i]);
+                       for (j = 0; j < mbuf_data_size_n; j++)
+                               mempools[i * MAX_SEGS_BUFFER_SPLIT + j] =
+                                       mbuf_pool_create(mbuf_data_size[j],
+                                                         nb_mbuf_per_pool,
+                                                         socket_ids[i], j);
        } else {
-               if (socket_num == UMA_NO_CONFIG)
-                       mempools[0] = mbuf_pool_create(mbuf_data_size,
-                                                      nb_mbuf_per_pool, 0);
-               else
-                       mempools[socket_num] = mbuf_pool_create
-                                                       (mbuf_data_size,
-                                                        nb_mbuf_per_pool,
-                                                        socket_num);
+               uint8_t i;
+
+               for (i = 0; i < mbuf_data_size_n; i++)
+                       mempools[i] = mbuf_pool_create
+                                       (mbuf_data_size[i],
+                                        nb_mbuf_per_pool,
+                                        socket_num == UMA_NO_CONFIG ?
+                                        0 : socket_num, i);
        }
 
        init_port_config();
@@ -1542,10 +1559,10 @@ init_config(void)
         */
        for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
                mbp = mbuf_pool_find(
-                       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
+                       rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]), 0);
 
                if (mbp == NULL)
-                       mbp = mbuf_pool_find(0);
+                       mbp = mbuf_pool_find(0, 0);
                fwd_lcores[lc_id]->mbp = mbp;
                /* initialize GSO context */
                fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
@@ -2345,7 +2362,7 @@ port_is_started(portid_t port_id)
 
 /* Configure the Rx and Tx hairpin queues for the selected port. */
 static int
-setup_hairpin_queues(portid_t pi)
+setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
 {
        queueid_t qi;
        struct rte_eth_hairpin_conf hairpin_conf = {
@@ -2354,10 +2371,49 @@ setup_hairpin_queues(portid_t pi)
        int i;
        int diag;
        struct rte_port *port = &ports[pi];
+       uint16_t peer_rx_port = pi;
+       uint16_t peer_tx_port = pi;
+       uint32_t manual = 1;
+       uint32_t tx_exp = hairpin_mode & 0x10;
+
+       if (!(hairpin_mode & 0xf)) {
+               peer_rx_port = pi;
+               peer_tx_port = pi;
+               manual = 0;
+       } else if (hairpin_mode & 0x1) {
+               peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
+                                                      RTE_ETH_DEV_NO_OWNER);
+               if (peer_tx_port >= RTE_MAX_ETHPORTS)
+                       peer_tx_port = rte_eth_find_next_owned_by(0,
+                                               RTE_ETH_DEV_NO_OWNER);
+               if (p_pi != RTE_MAX_ETHPORTS) {
+                       peer_rx_port = p_pi;
+               } else {
+                       uint16_t next_pi;
+
+                       /* Last port will be the peer RX port of the first. */
+                       RTE_ETH_FOREACH_DEV(next_pi)
+                               peer_rx_port = next_pi;
+               }
+               manual = 1;
+       } else if (hairpin_mode & 0x2) {
+               if (cnt_pi & 0x1) {
+                       peer_rx_port = p_pi;
+               } else {
+                       peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
+                                               RTE_ETH_DEV_NO_OWNER);
+                       if (peer_rx_port >= RTE_MAX_ETHPORTS)
+                               peer_rx_port = pi;
+               }
+               peer_tx_port = peer_rx_port;
+               manual = 1;
+       }
 
        for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
-               hairpin_conf.peers[0].port = pi;
+               hairpin_conf.peers[0].port = peer_rx_port;
                hairpin_conf.peers[0].queue = i + nb_rxq;
+               hairpin_conf.manual_bind = !!manual;
+               hairpin_conf.tx_explicit = !!tx_exp;
                diag = rte_eth_tx_hairpin_queue_setup
                        (pi, qi, nb_txd, &hairpin_conf);
                i++;
@@ -2377,8 +2433,10 @@ setup_hairpin_queues(portid_t pi)
                return -1;
        }
        for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
-               hairpin_conf.peers[0].port = pi;
+               hairpin_conf.peers[0].port = peer_tx_port;
                hairpin_conf.peers[0].queue = i + nb_txq;
+               hairpin_conf.manual_bind = !!manual;
+               hairpin_conf.tx_explicit = !!tx_exp;
                diag = rte_eth_rx_hairpin_queue_setup
                        (pi, qi, nb_rxd, &hairpin_conf);
                i++;
@@ -2400,11 +2458,62 @@ setup_hairpin_queues(portid_t pi)
        return 0;
 }
 
+/* Configure the Rx with optional split. */
+int
+rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+              uint16_t nb_rx_desc, unsigned int socket_id,
+              struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
+{
+       union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
+       unsigned int i, mp_n;
+       int ret;
+
+       if (rx_pkt_nb_segs <= 1 ||
+           (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
+               rx_conf->rx_seg = NULL;
+               rx_conf->rx_nseg = 0;
+               ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
+                                            nb_rx_desc, socket_id,
+                                            rx_conf, mp);
+               return ret;
+       }
+       for (i = 0; i < rx_pkt_nb_segs; i++) {
+               struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
+               struct rte_mempool *mpx;
+               /*
+                * Use last valid pool for the segments with number
+                * exceeding the pool index.
+                */
+               mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
+               mpx = mbuf_pool_find(socket_id, mp_n);
+               /* Handle zero as mbuf data buffer size. */
+               rx_seg->length = rx_pkt_seg_lengths[i] ?
+                                  rx_pkt_seg_lengths[i] :
+                                  mbuf_data_size[mp_n];
+               rx_seg->offset = i < rx_pkt_nb_offs ?
+                                  rx_pkt_seg_offsets[i] : 0;
+               rx_seg->mp = mpx ? mpx : mp;
+       }
+       rx_conf->rx_nseg = rx_pkt_nb_segs;
+       rx_conf->rx_seg = rx_useg;
+       ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
+                                   socket_id, rx_conf, NULL);
+       rx_conf->rx_seg = NULL;
+       rx_conf->rx_nseg = 0;
+       return ret;
+}
+
 int
 start_port(portid_t pid)
 {
        int diag, need_check_link_status = -1;
        portid_t pi;
+       portid_t p_pi = RTE_MAX_ETHPORTS;
+       portid_t pl[RTE_MAX_ETHPORTS];
+       portid_t peer_pl[RTE_MAX_ETHPORTS];
+       uint16_t cnt_pi = 0;
+       uint16_t cfg_pi = 0;
+       int peer_pi;
        queueid_t qi;
        struct rte_port *port;
        struct rte_ether_addr mac_addr;
@@ -2498,7 +2607,8 @@ start_port(portid_t pid)
                                if ((numa_support) &&
                                        (rxring_numa[pi] != NUMA_NO_CONFIG)) {
                                        struct rte_mempool * mp =
-                                               mbuf_pool_find(rxring_numa[pi]);
+                                               mbuf_pool_find
+                                                       (rxring_numa[pi], 0);
                                        if (mp == NULL) {
                                                printf("Failed to setup RX queue:"
                                                        "No mempool allocation"
@@ -2507,14 +2617,15 @@ start_port(portid_t pid)
                                                return -1;
                                        }
 
-                                       diag = rte_eth_rx_queue_setup(pi, qi,
+                                       diag = rx_queue_setup(pi, qi,
                                             port->nb_rx_desc[qi],
                                             rxring_numa[pi],
                                             &(port->rx_conf[qi]),
                                             mp);
                                } else {
                                        struct rte_mempool *mp =
-                                               mbuf_pool_find(port->socket_id);
+                                               mbuf_pool_find
+                                                       (port->socket_id, 0);
                                        if (mp == NULL) {
                                                printf("Failed to setup RX queue:"
                                                        "No mempool allocation"
@@ -2522,7 +2633,7 @@ start_port(portid_t pid)
                                                        port->socket_id);
                                                return -1;
                                        }
-                                       diag = rte_eth_rx_queue_setup(pi, qi,
+                                       diag = rx_queue_setup(pi, qi,
                                             port->nb_rx_desc[qi],
                                             port->socket_id,
                                             &(port->rx_conf[qi]),
@@ -2544,7 +2655,7 @@ start_port(portid_t pid)
                                return -1;
                        }
                        /* setup hairpin queues */
-                       if (setup_hairpin_queues(pi) != 0)
+                       if (setup_hairpin_queues(pi, p_pi, cnt_pi) != 0)
                                return -1;
                }
                configure_rxtx_dump_callbacks(verbose_level);
@@ -2557,6 +2668,9 @@ start_port(portid_t pid)
                                pi);
                }
 
+               p_pi = pi;
+               cnt_pi++;
+
                /* start port */
                if (rte_eth_dev_start(pi) < 0) {
                        printf("Fail to start port %d\n", pi);
@@ -2581,6 +2695,8 @@ start_port(portid_t pid)
 
                /* at least one port started, need checking link status */
                need_check_link_status = 1;
+
+               pl[cfg_pi++] = pi;
        }
 
        if (need_check_link_status == 1 && !no_link_check)
@@ -2588,6 +2704,50 @@ start_port(portid_t pid)
        else if (need_check_link_status == 0)
                printf("Please stop the ports first\n");
 
+       if (hairpin_mode & 0xf) {
+               uint16_t i;
+               int j;
+
+               /* bind all started hairpin ports */
+               for (i = 0; i < cfg_pi; i++) {
+                       pi = pl[i];
+                       /* bind current Tx to all peer Rx */
+                       peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
+                                                       RTE_MAX_ETHPORTS, 1);
+                       if (peer_pi < 0)
+                               return peer_pi;
+                       for (j = 0; j < peer_pi; j++) {
+                               if (!port_is_started(peer_pl[j]))
+                                       continue;
+                               diag = rte_eth_hairpin_bind(pi, peer_pl[j]);
+                               if (diag < 0) {
+                                       printf("Error during binding hairpin"
+                                              " Tx port %u to %u: %s\n",
+                                              pi, peer_pl[j],
+                                              rte_strerror(-diag));
+                                       return -1;
+                               }
+                       }
+                       /* bind all peer Tx to current Rx */
+                       peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
+                                                       RTE_MAX_ETHPORTS, 0);
+                       if (peer_pi < 0)
+                               return peer_pi;
+                       for (j = 0; j < peer_pi; j++) {
+                               if (!port_is_started(peer_pl[j]))
+                                       continue;
+                               diag = rte_eth_hairpin_bind(peer_pl[j], pi);
+                               if (diag < 0) {
+                                       printf("Error during binding hairpin"
+                                              " Tx port %u to %u: %s\n",
+                                              peer_pl[j], pi,
+                                              rte_strerror(-diag));
+                                       return -1;
+                               }
+                       }
+               }
+       }
+
        printf("Done\n");
        return 0;
 }
@@ -2598,6 +2758,8 @@ stop_port(portid_t pid)
        portid_t pi;
        struct rte_port *port;
        int need_check_link_status = 0;
+       portid_t peer_pl[RTE_MAX_ETHPORTS];
+       int peer_pi;
 
        if (dcb_test) {
                dcb_test = 0;
@@ -2628,6 +2790,22 @@ stop_port(portid_t pid)
                                                RTE_PORT_HANDLING) == 0)
                        continue;
 
+               if (hairpin_mode & 0xf) {
+                       int j;
+
+                       rte_eth_hairpin_unbind(pi, RTE_MAX_ETHPORTS);
+                       /* unbind all peer Tx from current Rx */
+                       peer_pi = rte_eth_hairpin_get_peer_ports(pi, peer_pl,
+                                                       RTE_MAX_ETHPORTS, 0);
+                       if (peer_pi < 0)
+                               continue;
+                       for (j = 0; j < peer_pi; j++) {
+                               if (!port_is_started(peer_pl[j]))
+                                       continue;
+                               rte_eth_hairpin_unbind(peer_pl[j], pi);
+                       }
+               }
+
                rte_eth_dev_stop(pi);
 
                if (rte_atomic16_cmpset(&(port->port_status),
@@ -2909,13 +3087,13 @@ void
 pmd_test_exit(void)
 {
        portid_t pt_id;
+       unsigned int i;
        int ret;
-       int i;
 
        if (test_done == 0)
                stop_packet_forwarding();
 
-       for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+       for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
                if (mempools[i]) {
                        if (mp_alloc_type == MP_ALLOC_ANON)
                                rte_mempool_mem_iter(mempools[i], dma_unmap_cb,
@@ -2959,7 +3137,7 @@ pmd_test_exit(void)
                        return;
                }
        }
-       for (i = 0 ; i < RTE_MAX_NUMA_NODES ; i++) {
+       for (i = 0 ; i < RTE_DIM(mempools) ; i++) {
                if (mempools[i])
                        rte_mempool_free(mempools[i]);
        }
@@ -3567,6 +3745,8 @@ init_port_dcb_config(portid_t pid,
 static void
 init_port(void)
 {
+       int i;
+
        /* Configuration of Ethernet ports. */
        ports = rte_zmalloc("testpmd: ports",
                            sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
@@ -3576,7 +3756,8 @@ init_port(void)
                                "rte_zmalloc(%d struct rte_port) failed\n",
                                RTE_MAX_ETHPORTS);
        }
-
+       for (i = 0; i < RTE_MAX_ETHPORTS; i++)
+               LIST_INIT(&ports[i].flow_tunnel_list);
        /* Initialize ports NUMA structures */
        memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
        memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);