+ /* No port is not started */
+ return 1;
+}
+
+int
+port_is_stopped(portid_t port_id)
+{
+ struct rte_port *port = &ports[port_id];
+
+ if ((port->port_status != RTE_PORT_STOPPED) &&
+ (port->slave_flag == 0))
+ return 0;
+ return 1;
+}
+
+int
+all_ports_stopped(void)
+{
+ portid_t pi;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (!port_is_stopped(pi))
+ return 0;
+ }
+
+ return 1;
+}
+
+int
+port_is_started(portid_t port_id)
+{
+ if (port_id_is_invalid(port_id, ENABLED_WARN))
+ return 0;
+
+ if (ports[port_id].port_status != RTE_PORT_STARTED)
+ return 0;
+
+ return 1;
+}
+
+/* Configure the Rx and Tx hairpin queues for the selected port. */
+static int
+setup_hairpin_queues(portid_t pi, portid_t p_pi, uint16_t cnt_pi)
+{
+ queueid_t qi;
+ struct rte_eth_hairpin_conf hairpin_conf = {
+ .peer_count = 1,
+ };
+ int i;
+ int diag;
+ struct rte_port *port = &ports[pi];
+ uint16_t peer_rx_port = pi;
+ uint16_t peer_tx_port = pi;
+ uint32_t manual = 1;
+ uint32_t tx_exp = hairpin_mode & 0x10;
+
+ if (!(hairpin_mode & 0xf)) {
+ peer_rx_port = pi;
+ peer_tx_port = pi;
+ manual = 0;
+ } else if (hairpin_mode & 0x1) {
+ peer_tx_port = rte_eth_find_next_owned_by(pi + 1,
+ RTE_ETH_DEV_NO_OWNER);
+ if (peer_tx_port >= RTE_MAX_ETHPORTS)
+ peer_tx_port = rte_eth_find_next_owned_by(0,
+ RTE_ETH_DEV_NO_OWNER);
+ if (p_pi != RTE_MAX_ETHPORTS) {
+ peer_rx_port = p_pi;
+ } else {
+ uint16_t next_pi;
+
+ /* Last port will be the peer RX port of the first. */
+ RTE_ETH_FOREACH_DEV(next_pi)
+ peer_rx_port = next_pi;
+ }
+ manual = 1;
+ } else if (hairpin_mode & 0x2) {
+ if (cnt_pi & 0x1) {
+ peer_rx_port = p_pi;
+ } else {
+ peer_rx_port = rte_eth_find_next_owned_by(pi + 1,
+ RTE_ETH_DEV_NO_OWNER);
+ if (peer_rx_port >= RTE_MAX_ETHPORTS)
+ peer_rx_port = pi;
+ }
+ peer_tx_port = peer_rx_port;
+ manual = 1;
+ }
+
+ for (qi = nb_txq, i = 0; qi < nb_hairpinq + nb_txq; qi++) {
+ hairpin_conf.peers[0].port = peer_rx_port;
+ hairpin_conf.peers[0].queue = i + nb_rxq;
+ hairpin_conf.manual_bind = !!manual;
+ hairpin_conf.tx_explicit = !!tx_exp;
+ diag = rte_eth_tx_hairpin_queue_setup
+ (pi, qi, nb_txd, &hairpin_conf);
+ i++;
+ if (diag == 0)
+ continue;
+
+ /* Fail to setup rx queue, return */
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
+ fprintf(stderr,
+ "Port %d can not be set back to stopped\n", pi);
+ fprintf(stderr, "Fail to configure port %d hairpin queues\n",
+ pi);
+ /* try to reconfigure queues next time */
+ port->need_reconfig_queues = 1;
+ return -1;
+ }
+ for (qi = nb_rxq, i = 0; qi < nb_hairpinq + nb_rxq; qi++) {
+ hairpin_conf.peers[0].port = peer_tx_port;
+ hairpin_conf.peers[0].queue = i + nb_txq;
+ hairpin_conf.manual_bind = !!manual;
+ hairpin_conf.tx_explicit = !!tx_exp;
+ diag = rte_eth_rx_hairpin_queue_setup
+ (pi, qi, nb_rxd, &hairpin_conf);
+ i++;
+ if (diag == 0)
+ continue;
+
+ /* Fail to setup rx queue, return */
+ if (port->port_status == RTE_PORT_HANDLING)
+ port->port_status = RTE_PORT_STOPPED;
+ else
+ fprintf(stderr,
+ "Port %d can not be set back to stopped\n", pi);
+ fprintf(stderr, "Fail to configure port %d hairpin queues\n",
+ pi);
+ /* try to reconfigure queues next time */
+ port->need_reconfig_queues = 1;
+ return -1;
+ }
+ return 0;
+}
+
+/* Configure the Rx with optional split. */
+int
+rx_queue_setup(uint16_t port_id, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp)
+{
+ union rte_eth_rxseg rx_useg[MAX_SEGS_BUFFER_SPLIT] = {};
+ unsigned int i, mp_n;
+ int ret;
+
+ if (rx_pkt_nb_segs <= 1 ||
+ (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_BUFFER_SPLIT) == 0) {
+ rx_conf->rx_seg = NULL;
+ rx_conf->rx_nseg = 0;
+ ret = rte_eth_rx_queue_setup(port_id, rx_queue_id,
+ nb_rx_desc, socket_id,
+ rx_conf, mp);
+ return ret;
+ }
+ for (i = 0; i < rx_pkt_nb_segs; i++) {
+ struct rte_eth_rxseg_split *rx_seg = &rx_useg[i].split;
+ struct rte_mempool *mpx;
+ /*
+ * Use last valid pool for the segments with number
+ * exceeding the pool index.
+ */
+ mp_n = (i > mbuf_data_size_n) ? mbuf_data_size_n - 1 : i;
+ mpx = mbuf_pool_find(socket_id, mp_n);
+ /* Handle zero as mbuf data buffer size. */
+ rx_seg->length = rx_pkt_seg_lengths[i] ?
+ rx_pkt_seg_lengths[i] :
+ mbuf_data_size[mp_n];
+ rx_seg->offset = i < rx_pkt_nb_offs ?
+ rx_pkt_seg_offsets[i] : 0;
+ rx_seg->mp = mpx ? mpx : mp;
+ }
+ rx_conf->rx_nseg = rx_pkt_nb_segs;
+ rx_conf->rx_seg = rx_useg;
+ ret = rte_eth_rx_queue_setup(port_id, rx_queue_id, nb_rx_desc,
+ socket_id, rx_conf, NULL);
+ rx_conf->rx_seg = NULL;
+ rx_conf->rx_nseg = 0;
+ return ret;
+}
+
+static int
+alloc_xstats_display_info(portid_t pi)
+{
+ uint64_t **ids_supp = &ports[pi].xstats_info.ids_supp;
+ uint64_t **prev_values = &ports[pi].xstats_info.prev_values;
+ uint64_t **curr_values = &ports[pi].xstats_info.curr_values;
+
+ if (xstats_display_num == 0)
+ return 0;
+
+ *ids_supp = calloc(xstats_display_num, sizeof(**ids_supp));
+ if (*ids_supp == NULL)
+ goto fail_ids_supp;
+
+ *prev_values = calloc(xstats_display_num,
+ sizeof(**prev_values));
+ if (*prev_values == NULL)
+ goto fail_prev_values;
+
+ *curr_values = calloc(xstats_display_num,
+ sizeof(**curr_values));
+ if (*curr_values == NULL)
+ goto fail_curr_values;
+
+ ports[pi].xstats_info.allocated = true;
+
+ return 0;
+
+fail_curr_values:
+ free(*prev_values);
+fail_prev_values:
+ free(*ids_supp);
+fail_ids_supp:
+ return -ENOMEM;