#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
-#include <rte_ring.h>
#include <rte_log.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
/* Max size of a single packet */
#define MAX_PACKET_SZ 2048
-/* Number of bytes needed for each mbuf */
-#define MBUF_SZ \
- (MAX_PACKET_SZ + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+/* Size of the data buffer in each mbuf */
+#define MBUF_DATA_SZ (MAX_PACKET_SZ + RTE_PKTMBUF_HEADROOM)
/* Number of mbufs in mempool that is created */
#define NB_MBUF (8192 * 16)
static struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
-/* RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-/* RX ring configuration */
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8, /* Ring prefetch threshold */
- .hthresh = 8, /* Ring host threshold */
- .wthresh = 4, /* Ring writeback threshold */
- },
- .rx_free_thresh = 0, /* Immediately free RX descriptors */
-};
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-/* TX ring configuration */
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36, /* Ring prefetch threshold */
- .hthresh = 0, /* Ring host threshold */
- .wthresh = 0, /* Ring writeback threshold */
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
-};
/* Options for configuring ethernet port */
static struct rte_eth_conf port_conf = {
.hw_ip_checksum = 0, /* IP checksum offload disabled */
.hw_vlan_filter = 0, /* VLAN filtering disabled */
.jumbo_frame = 0, /* Jumbo Frame Support disabled */
- .hw_strip_crc = 0, /* CRC stripped by hardware */
+ .hw_strip_crc = 1, /* CRC stripped by hardware */
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
};
enum lcore_rxtx flag = LCORE_NONE;
- nb_ports = (uint8_t)(nb_ports < RTE_MAX_ETHPORTS ?
- nb_ports : RTE_MAX_ETHPORTS);
for (i = 0; i < nb_ports; i++) {
if (!kni_port_params_array[i])
continue;
goto fail;
}
kni_port_params_array[port_id] =
- (struct kni_port_params*)rte_zmalloc("KNI_port_params",
- sizeof(struct kni_port_params), CACHE_LINE_SIZE);
+ rte_zmalloc("KNI_port_params",
+ sizeof(struct kni_port_params), RTE_CACHE_LINE_SIZE);
kni_port_params_array[port_id]->port_id = port_id;
kni_port_params_array[port_id]->lcore_rx =
(uint8_t)int_fld[i++];
return ret;
}
+/* Initialize KNI subsystem */
+static void
+init_kni(void)
+{
+ unsigned int num_of_kni_ports = 0, i;
+ struct kni_port_params **params = kni_port_params_array;
+
+ /* Calculate the maximum number of KNI interfaces that will be used */
+ for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
+ if (kni_port_params_array[i]) {
+ num_of_kni_ports += (params[i]->nb_lcore_k ?
+ params[i]->nb_lcore_k : 1);
+ }
+ }
+
+ /* Invoke rte KNI init to preallocate the ports */
+ rte_kni_init(num_of_kni_ports);
+}
+
/* Initialise a single port on an Ethernet device */
static void
init_port(uint8_t port)
(unsigned)port, ret);
ret = rte_eth_rx_queue_setup(port, 0, NB_RXD,
- rte_eth_dev_socket_id(port), &rx_conf, pktmbuf_pool);
+ rte_eth_dev_socket_id(port), NULL, pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
"port%u (%d)\n", (unsigned)port, ret);
ret = rte_eth_tx_queue_setup(port, 0, NB_TXD,
- rte_eth_dev_socket_id(port), &tx_conf);
+ rte_eth_dev_socket_id(port), NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
"port%u (%d)\n", (unsigned)port, ret);
continue;
}
/* clear all_ports_up flag if any link down */
- if (link.link_status == 0) {
+ if (link.link_status == ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
if (port_id >= RTE_MAX_ETHPORTS || !p[port_id])
return -1;
- for (i = 0; i < p[i]->nb_kni; i++) {
- rte_kni_release(p[i]->kni[i]);
- p[i]->kni[i] = NULL;
+ for (i = 0; i < p[port_id]->nb_kni; i++) {
+ if (rte_kni_release(p[port_id]->kni[i]))
+ printf("Fail to release kni\n");
+ p[port_id]->kni[i] = NULL;
}
rte_eth_dev_stop(port_id);
rte_exit(EXIT_FAILURE, "Could not parse input parameters\n");
/* Create the mbuf pool */
- pktmbuf_pool = rte_mempool_create("mbuf_pool", NB_MBUF, MBUF_SZ,
- MEMPOOL_CACHE_SZ,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ pktmbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", NB_MBUF,
+ MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, rte_socket_id());
if (pktmbuf_pool == NULL) {
rte_exit(EXIT_FAILURE, "Could not initialise mbuf pool\n");
return -1;
/* Get number of ports found in scan */
nb_sys_ports = rte_eth_dev_count();
if (nb_sys_ports == 0)
- rte_exit(EXIT_FAILURE, "No supported Ethernet devices found - "
- "check that CONFIG_RTE_LIBRTE_IGB_PMD=y and/or "
- "CONFIG_RTE_LIBRTE_IXGBE_PMD=y in the config file\n");
+ rte_exit(EXIT_FAILURE, "No supported Ethernet device found\n");
/* Check if the configured port ID is valid */
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
rte_exit(EXIT_FAILURE, "Configured invalid "
"port ID %u\n", i);
+ /* Initialize KNI subsystem */
+ init_kni();
+
/* Initialise each port */
for (port = 0; port < nb_sys_ports; port++) {
/* Skip ports that are not enabled */
return 0;
}
-