+ /* if we were told not to allocate hugepages, override */
+ if (!huge)
+ cur_pgsz = sysconf(_SC_PAGESIZE);
+
+ ret = calc_mem_size(nb_mbufs, mbuf_sz, cur_pgsz, &mem_sz);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot calculate memory size\n");
+ return -1;
+ }
+
+ /* allocate our memory */
+ addr = alloc_mem(mem_sz, cur_pgsz, huge);
+
+ /* if we couldn't allocate memory with a specified page size,
+ * that doesn't mean we can't do it with other page sizes, so
+ * try another one.
+ */
+ if (addr == NULL)
+ continue;
+
+ /* store IOVA addresses for every page in this memory area */
+ n_pages = mem_sz / cur_pgsz;
+
+ iovas = malloc(sizeof(*iovas) * n_pages);
+
+ if (iovas == NULL) {
+ TESTPMD_LOG(ERR, "Cannot allocate memory for iova addresses\n");
+ goto fail;
+ }
+ /* lock memory if it's not huge pages */
+ if (!huge)
+ mlock(addr, mem_sz);
+
+ /* populate IOVA addresses */
+ for (cur_page = 0; cur_page < n_pages; cur_page++) {
+ rte_iova_t iova;
+ size_t offset;
+ void *cur;
+
+ offset = cur_pgsz * cur_page;
+ cur = RTE_PTR_ADD(addr, offset);
+
+ /* touch the page before getting its IOVA */
+ *(volatile char *)cur = 0;
+
+ iova = rte_mem_virt2iova(cur);
+
+ iovas[cur_page] = iova;
+ }
+
+ break;
+ }
+ /* if we couldn't allocate anything */
+ if (iovas == NULL)
+ return -1;
+
+ param->addr = addr;
+ param->len = mem_sz;
+ param->pgsz = cur_pgsz;
+ param->iova_table = iovas;
+ param->iova_table_len = n_pages;
+
+ return 0;
+fail:
+ if (iovas)
+ free(iovas);
+ if (addr)
+ munmap(addr, mem_sz);
+
+ return -1;
+}
+
+static int
+setup_extmem(uint32_t nb_mbufs, uint32_t mbuf_sz, bool huge)
+{
+ struct extmem_param param;
+ int socket_id, ret;
+
+ memset(¶m, 0, sizeof(param));
+
+ /* check if our heap exists */
+ socket_id = rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
+ if (socket_id < 0) {
+ /* create our heap */
+ ret = rte_malloc_heap_create(EXTMEM_HEAP_NAME);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot create heap\n");
+ return -1;
+ }
+ }
+
+ ret = create_extmem(nb_mbufs, mbuf_sz, ¶m, huge);
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot create memory area\n");
+ return -1;
+ }
+
+ /* we now have a valid memory area, so add it to heap */
+ ret = rte_malloc_heap_memory_add(EXTMEM_HEAP_NAME,
+ param.addr, param.len, param.iova_table,
+ param.iova_table_len, param.pgsz);
+
+ /* when using VFIO, memory is automatically mapped for DMA by EAL */
+
+ /* not needed any more */
+ free(param.iova_table);
+
+ if (ret < 0) {
+ TESTPMD_LOG(ERR, "Cannot add memory to heap\n");
+ munmap(param.addr, param.len);
+ return -1;
+ }
+
+ /* success */
+
+ TESTPMD_LOG(DEBUG, "Allocated %zuMB of external memory\n",
+ param.len >> 20);
+
+ return 0;
+}
+static void
+dma_unmap_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ int ret;
+
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev_info dev_info;
+
+ ret = eth_dev_info_get_print_err(pid, &dev_info);
+ if (ret != 0) {
+ TESTPMD_LOG(DEBUG,
+ "unable to get device info for port %d on addr 0x%p,"
+ "mempool unmapping will not be performed\n",
+ pid, memhdr->addr);
+ continue;
+ }
+
+ ret = rte_dev_dma_unmap(dev_info.device, memhdr->addr, 0, memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA unmap addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev_info.device->name);
+ }
+ }
+ ret = rte_extmem_unregister(memhdr->addr, memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to un-register addr 0x%p\n", memhdr->addr);
+ }
+}
+
+static void
+dma_map_cb(struct rte_mempool *mp __rte_unused, void *opaque __rte_unused,
+ struct rte_mempool_memhdr *memhdr, unsigned mem_idx __rte_unused)
+{
+ uint16_t pid = 0;
+ size_t page_size = sysconf(_SC_PAGESIZE);
+ int ret;
+
+ ret = rte_extmem_register(memhdr->addr, memhdr->len, NULL, 0,
+ page_size);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to register addr 0x%p\n", memhdr->addr);
+ return;
+ }
+ RTE_ETH_FOREACH_DEV(pid) {
+ struct rte_eth_dev_info dev_info;
+
+ ret = eth_dev_info_get_print_err(pid, &dev_info);
+ if (ret != 0) {
+ TESTPMD_LOG(DEBUG,
+ "unable to get device info for port %d on addr 0x%p,"
+ "mempool mapping will not be performed\n",
+ pid, memhdr->addr);
+ continue;
+ }
+ ret = rte_dev_dma_map(dev_info.device, memhdr->addr, 0, memhdr->len);
+ if (ret) {
+ TESTPMD_LOG(DEBUG,
+ "unable to DMA map addr 0x%p "
+ "for device %s\n",
+ memhdr->addr, dev_info.device->name);
+ }
+ }
+}
+#endif
+
+static unsigned int
+setup_extbuf(uint32_t nb_mbufs, uint16_t mbuf_sz, unsigned int socket_id,
+ char *pool_name, struct rte_pktmbuf_extmem **ext_mem)
+{
+ struct rte_pktmbuf_extmem *xmem;
+ unsigned int ext_num, zone_num, elt_num;
+ uint16_t elt_size;
+
+ elt_size = RTE_ALIGN_CEIL(mbuf_sz, RTE_CACHE_LINE_SIZE);
+ elt_num = EXTBUF_ZONE_SIZE / elt_size;
+ zone_num = (nb_mbufs + elt_num - 1) / elt_num;
+
+ xmem = malloc(sizeof(struct rte_pktmbuf_extmem) * zone_num);
+ if (xmem == NULL) {
+ TESTPMD_LOG(ERR, "Cannot allocate memory for "
+ "external buffer descriptors\n");
+ *ext_mem = NULL;
+ return 0;
+ }
+ for (ext_num = 0; ext_num < zone_num; ext_num++) {
+ struct rte_pktmbuf_extmem *xseg = xmem + ext_num;
+ const struct rte_memzone *mz;
+ char mz_name[RTE_MEMZONE_NAMESIZE];
+ int ret;
+
+ ret = snprintf(mz_name, sizeof(mz_name),
+ RTE_MEMPOOL_MZ_FORMAT "_xb_%u", pool_name, ext_num);
+ if (ret < 0 || ret >= (int)sizeof(mz_name)) {
+ errno = ENAMETOOLONG;
+ ext_num = 0;
+ break;
+ }
+ mz = rte_memzone_reserve_aligned(mz_name, EXTBUF_ZONE_SIZE,
+ socket_id,
+ RTE_MEMZONE_IOVA_CONTIG |
+ RTE_MEMZONE_1GB |
+ RTE_MEMZONE_SIZE_HINT_ONLY,
+ EXTBUF_ZONE_SIZE);
+ if (mz == NULL) {
+ /*
+ * The caller exits on external buffer creation
+ * error, so there is no need to free memzones.
+ */
+ errno = ENOMEM;
+ ext_num = 0;
+ break;
+ }
+ xseg->buf_ptr = mz->addr;
+ xseg->buf_iova = mz->iova;
+ xseg->buf_len = EXTBUF_ZONE_SIZE;
+ xseg->elt_size = elt_size;
+ }
+ if (ext_num == 0 && xmem != NULL) {
+ free(xmem);
+ xmem = NULL;
+ }
+ *ext_mem = xmem;
+ return ext_num;
+}
+
+/*
+ * Configuration initialisation done once at init time.
+ */
+static struct rte_mempool *
+mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
+ unsigned int socket_id, uint16_t size_idx)
+{
+ char pool_name[RTE_MEMPOOL_NAMESIZE];
+ struct rte_mempool *rte_mp = NULL;
+#ifndef RTE_EXEC_ENV_WINDOWS
+ uint32_t mb_size;
+
+ mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
+#endif
+ mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name), size_idx);
+ if (!is_proc_primary()) {
+ rte_mp = rte_mempool_lookup(pool_name);
+ if (rte_mp == NULL)
+ rte_exit(EXIT_FAILURE,
+ "Get mbuf pool for socket %u failed: %s\n",
+ socket_id, rte_strerror(rte_errno));
+ return rte_mp;
+ }
+
+ TESTPMD_LOG(INFO,
+ "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
+ pool_name, nb_mbuf, mbuf_seg_size, socket_id);
+
+ switch (mp_alloc_type) {
+ case MP_ALLOC_NATIVE:
+ {
+ /* wrapper to rte_mempool_create() */
+ TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
+ rte_mbuf_best_mempool_ops());
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size, socket_id);
+ break;
+ }
+#ifndef RTE_EXEC_ENV_WINDOWS
+ case MP_ALLOC_ANON:
+ {
+ rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
+ mb_size, (unsigned int) mb_mempool_cache,
+ sizeof(struct rte_pktmbuf_pool_private),
+ socket_id, mempool_flags);
+ if (rte_mp == NULL)
+ goto err;
+
+ if (rte_mempool_populate_anon(rte_mp) == 0) {
+ rte_mempool_free(rte_mp);
+ rte_mp = NULL;
+ goto err;
+ }
+ rte_pktmbuf_pool_init(rte_mp, NULL);
+ rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
+ rte_mempool_mem_iter(rte_mp, dma_map_cb, NULL);
+ break;
+ }
+ case MP_ALLOC_XMEM:
+ case MP_ALLOC_XMEM_HUGE:
+ {
+ int heap_socket;
+ bool huge = mp_alloc_type == MP_ALLOC_XMEM_HUGE;
+
+ if (setup_extmem(nb_mbuf, mbuf_seg_size, huge) < 0)
+ rte_exit(EXIT_FAILURE, "Could not create external memory\n");
+
+ heap_socket =
+ rte_malloc_heap_get_socket(EXTMEM_HEAP_NAME);
+ if (heap_socket < 0)
+ rte_exit(EXIT_FAILURE, "Could not get external memory socket ID\n");
+
+ TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
+ rte_mbuf_best_mempool_ops());
+ rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
+ mb_mempool_cache, 0, mbuf_seg_size,
+ heap_socket);
+ break;
+ }
+#endif
+ case MP_ALLOC_XBUF:
+ {
+ struct rte_pktmbuf_extmem *ext_mem;
+ unsigned int ext_num;
+
+ ext_num = setup_extbuf(nb_mbuf, mbuf_seg_size,
+ socket_id, pool_name, &ext_mem);
+ if (ext_num == 0)
+ rte_exit(EXIT_FAILURE,
+ "Can't create pinned data buffers\n");
+
+ TESTPMD_LOG(INFO, "preferred mempool ops selected: %s\n",
+ rte_mbuf_best_mempool_ops());
+ rte_mp = rte_pktmbuf_pool_create_extbuf
+ (pool_name, nb_mbuf, mb_mempool_cache,
+ 0, mbuf_seg_size, socket_id,
+ ext_mem, ext_num);
+ free(ext_mem);
+ break;
+ }
+ default:
+ {
+ rte_exit(EXIT_FAILURE, "Invalid mempool creation mode\n");
+ }
+ }
+
+#ifndef RTE_EXEC_ENV_WINDOWS
+err:
+#endif
+ if (rte_mp == NULL) {
+ rte_exit(EXIT_FAILURE,
+ "Creation of mbuf pool for socket %u failed: %s\n",
+ socket_id, rte_strerror(rte_errno));
+ } else if (verbose_level > 0) {
+ rte_mempool_dump(stdout, rte_mp);
+ }
+ return rte_mp;
+}
+
+/*
+ * Check given socket id is valid or not with NUMA mode,
+ * if valid, return 0, else return -1
+ */
+static int
+check_socket_id(const unsigned int socket_id)
+{
+ static int warning_once = 0;
+
+ if (new_socket_id(socket_id)) {
+ if (!warning_once && numa_support)
+ fprintf(stderr,
+ "Warning: NUMA should be configured manually by using --port-numa-config and --ring-numa-config parameters along with --numa.\n");
+ warning_once = 1;
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Get the allowed maximum number of RX queues.
+ * *pid return the port id which has minimal value of
+ * max_rx_queues in all ports.
+ */
+queueid_t
+get_allowed_max_nb_rxq(portid_t *pid)
+{
+ queueid_t allowed_max_rxq = RTE_MAX_QUEUES_PER_PORT;
+ bool max_rxq_valid = false;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ max_rxq_valid = true;
+ if (dev_info.max_rx_queues < allowed_max_rxq) {
+ allowed_max_rxq = dev_info.max_rx_queues;
+ *pid = pi;
+ }
+ }
+ return max_rxq_valid ? allowed_max_rxq : 0;
+}
+
+/*
+ * Check input rxq is valid or not.
+ * If input rxq is not greater than any of maximum number
+ * of RX queues of all ports, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_rxq(queueid_t rxq)
+{
+ queueid_t allowed_max_rxq;
+ portid_t pid = 0;
+
+ allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
+ if (rxq > allowed_max_rxq) {
+ fprintf(stderr,
+ "Fail: input rxq (%u) can't be greater than max_rx_queues (%u) of port %u\n",
+ rxq, allowed_max_rxq, pid);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Get the allowed maximum number of TX queues.
+ * *pid return the port id which has minimal value of
+ * max_tx_queues in all ports.
+ */
+queueid_t
+get_allowed_max_nb_txq(portid_t *pid)
+{
+ queueid_t allowed_max_txq = RTE_MAX_QUEUES_PER_PORT;
+ bool max_txq_valid = false;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ max_txq_valid = true;
+ if (dev_info.max_tx_queues < allowed_max_txq) {
+ allowed_max_txq = dev_info.max_tx_queues;
+ *pid = pi;
+ }
+ }
+ return max_txq_valid ? allowed_max_txq : 0;
+}
+
+/*
+ * Check input txq is valid or not.
+ * If input txq is not greater than any of maximum number
+ * of TX queues of all ports, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_txq(queueid_t txq)
+{
+ queueid_t allowed_max_txq;
+ portid_t pid = 0;
+
+ allowed_max_txq = get_allowed_max_nb_txq(&pid);
+ if (txq > allowed_max_txq) {
+ fprintf(stderr,
+ "Fail: input txq (%u) can't be greater than max_tx_queues (%u) of port %u\n",
+ txq, allowed_max_txq, pid);
+ return -1;
+ }
+ return 0;
+}
+
+/*
+ * Get the allowed maximum number of RXDs of every rx queue.
+ * *pid return the port id which has minimal value of
+ * max_rxd in all queues of all ports.
+ */
+static uint16_t
+get_allowed_max_nb_rxd(portid_t *pid)
+{
+ uint16_t allowed_max_rxd = UINT16_MAX;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ if (dev_info.rx_desc_lim.nb_max < allowed_max_rxd) {
+ allowed_max_rxd = dev_info.rx_desc_lim.nb_max;
+ *pid = pi;
+ }
+ }
+ return allowed_max_rxd;
+}
+
+/*
+ * Get the allowed minimal number of RXDs of every rx queue.
+ * *pid return the port id which has minimal value of
+ * min_rxd in all queues of all ports.
+ */
+static uint16_t
+get_allowed_min_nb_rxd(portid_t *pid)
+{
+ uint16_t allowed_min_rxd = 0;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ if (dev_info.rx_desc_lim.nb_min > allowed_min_rxd) {
+ allowed_min_rxd = dev_info.rx_desc_lim.nb_min;
+ *pid = pi;
+ }
+ }
+
+ return allowed_min_rxd;
+}
+
+/*
+ * Check input rxd is valid or not.
+ * If input rxd is not greater than any of maximum number
+ * of RXDs of every Rx queues and is not less than any of
+ * minimal number of RXDs of every Rx queues, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_rxd(queueid_t rxd)
+{
+ uint16_t allowed_max_rxd;
+ uint16_t allowed_min_rxd;
+ portid_t pid = 0;
+
+ allowed_max_rxd = get_allowed_max_nb_rxd(&pid);
+ if (rxd > allowed_max_rxd) {
+ fprintf(stderr,
+ "Fail: input rxd (%u) can't be greater than max_rxds (%u) of port %u\n",
+ rxd, allowed_max_rxd, pid);
+ return -1;
+ }
+
+ allowed_min_rxd = get_allowed_min_nb_rxd(&pid);
+ if (rxd < allowed_min_rxd) {
+ fprintf(stderr,
+ "Fail: input rxd (%u) can't be less than min_rxds (%u) of port %u\n",
+ rxd, allowed_min_rxd, pid);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Get the allowed maximum number of TXDs of every rx queues.
+ * *pid return the port id which has minimal value of
+ * max_txd in every tx queue.
+ */
+static uint16_t
+get_allowed_max_nb_txd(portid_t *pid)
+{
+ uint16_t allowed_max_txd = UINT16_MAX;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ if (dev_info.tx_desc_lim.nb_max < allowed_max_txd) {
+ allowed_max_txd = dev_info.tx_desc_lim.nb_max;
+ *pid = pi;
+ }
+ }
+ return allowed_max_txd;
+}
+
+/*
+ * Get the allowed maximum number of TXDs of every tx queues.
+ * *pid return the port id which has minimal value of
+ * min_txd in every tx queue.
+ */
+static uint16_t
+get_allowed_min_nb_txd(portid_t *pid)
+{
+ uint16_t allowed_min_txd = 0;
+ portid_t pi;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (eth_dev_info_get_print_err(pi, &dev_info) != 0)
+ continue;
+
+ if (dev_info.tx_desc_lim.nb_min > allowed_min_txd) {
+ allowed_min_txd = dev_info.tx_desc_lim.nb_min;
+ *pid = pi;
+ }
+ }
+
+ return allowed_min_txd;
+}
+
+/*
+ * Check input txd is valid or not.
+ * If input txd is not greater than any of maximum number
+ * of TXDs of every Rx queues, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_txd(queueid_t txd)
+{
+ uint16_t allowed_max_txd;
+ uint16_t allowed_min_txd;
+ portid_t pid = 0;
+
+ allowed_max_txd = get_allowed_max_nb_txd(&pid);
+ if (txd > allowed_max_txd) {
+ fprintf(stderr,
+ "Fail: input txd (%u) can't be greater than max_txds (%u) of port %u\n",
+ txd, allowed_max_txd, pid);
+ return -1;
+ }
+
+ allowed_min_txd = get_allowed_min_nb_txd(&pid);
+ if (txd < allowed_min_txd) {
+ fprintf(stderr,
+ "Fail: input txd (%u) can't be less than min_txds (%u) of port %u\n",
+ txd, allowed_min_txd, pid);
+ return -1;
+ }
+ return 0;
+}
+
+
+/*
+ * Get the allowed maximum number of hairpin queues.
+ * *pid return the port id which has minimal value of
+ * max_hairpin_queues in all ports.
+ */
+queueid_t
+get_allowed_max_nb_hairpinq(portid_t *pid)
+{
+ queueid_t allowed_max_hairpinq = RTE_MAX_QUEUES_PER_PORT;
+ portid_t pi;
+ struct rte_eth_hairpin_cap cap;
+
+ RTE_ETH_FOREACH_DEV(pi) {
+ if (rte_eth_dev_hairpin_capability_get(pi, &cap) != 0) {
+ *pid = pi;
+ return 0;
+ }
+ if (cap.max_nb_queues < allowed_max_hairpinq) {
+ allowed_max_hairpinq = cap.max_nb_queues;
+ *pid = pi;
+ }
+ }
+ return allowed_max_hairpinq;
+}
+
+/*
+ * Check input hairpin is valid or not.
+ * If input hairpin is not greater than any of maximum number
+ * of hairpin queues of all ports, it is valid.
+ * if valid, return 0, else return -1
+ */
+int
+check_nb_hairpinq(queueid_t hairpinq)
+{
+ queueid_t allowed_max_hairpinq;
+ portid_t pid = 0;
+
+ allowed_max_hairpinq = get_allowed_max_nb_hairpinq(&pid);
+ if (hairpinq > allowed_max_hairpinq) {
+ fprintf(stderr,
+ "Fail: input hairpin (%u) can't be greater than max_hairpin_queues (%u) of port %u\n",
+ hairpinq, allowed_max_hairpinq, pid);
+ return -1;
+ }
+ return 0;
+}
+
+static int
+get_eth_overhead(struct rte_eth_dev_info *dev_info)
+{
+ uint32_t eth_overhead;
+
+ if (dev_info->max_mtu != UINT16_MAX &&
+ dev_info->max_rx_pktlen > dev_info->max_mtu)
+ eth_overhead = dev_info->max_rx_pktlen - dev_info->max_mtu;
+ else
+ eth_overhead = RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
+
+ return eth_overhead;
+}
+
+static void
+init_config_port_offloads(portid_t pid, uint32_t socket_id)
+{
+ struct rte_port *port = &ports[pid];
+ int ret;
+ int i;
+
+ eth_rx_metadata_negotiate_mp(pid);
+ flow_pick_transfer_proxy_mp(pid);
+
+ port->dev_conf.txmode = tx_mode;
+ port->dev_conf.rxmode = rx_mode;
+
+ ret = eth_dev_info_get_print_err(pid, &port->dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_info_get() failed\n");
+
+ if (!(port->dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE))
+ port->dev_conf.txmode.offloads &=
+ ~DEV_TX_OFFLOAD_MBUF_FAST_FREE;
+
+ /* Apply Rx offloads configuration */
+ for (i = 0; i < port->dev_info.max_rx_queues; i++)
+ port->rx_conf[i].offloads = port->dev_conf.rxmode.offloads;
+ /* Apply Tx offloads configuration */
+ for (i = 0; i < port->dev_info.max_tx_queues; i++)
+ port->tx_conf[i].offloads = port->dev_conf.txmode.offloads;
+
+ if (eth_link_speed)
+ port->dev_conf.link_speeds = eth_link_speed;
+
+ if (max_rx_pkt_len)
+ port->dev_conf.rxmode.mtu = max_rx_pkt_len -
+ get_eth_overhead(&port->dev_info);
+
+ /* set flag to initialize port/queue */
+ port->need_reconfig = 1;
+ port->need_reconfig_queues = 1;
+ port->socket_id = socket_id;
+ port->tx_metadata = 0;
+
+ /*
+ * Check for maximum number of segments per MTU.
+ * Accordingly update the mbuf data size.
+ */
+ if (port->dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
+ uint32_t eth_overhead = get_eth_overhead(&port->dev_info);
+ uint16_t mtu;
+
+ if (rte_eth_dev_get_mtu(pid, &mtu) == 0) {
+ uint16_t data_size = (mtu + eth_overhead) /
+ port->dev_info.rx_desc_lim.nb_mtu_seg_max;
+ uint16_t buffer_size = data_size + RTE_PKTMBUF_HEADROOM;
+
+ if (buffer_size > mbuf_data_size[0]) {
+ mbuf_data_size[0] = buffer_size;
+ TESTPMD_LOG(WARNING,
+ "Configured mbuf size of the first segment %hu\n",
+ mbuf_data_size[0]);
+ }
+ }
+ }
+}
+
+static void