/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
/* Macros for printing using RTE_LOG */
#define RTE_LOGTYPE_APP RTE_LOGTYPE_USER1
-/* NUMA socket to allocate mbuf pool on */
-#define SOCKET 0
-
/* Max size of a single packet */
#define MAX_PACKET_SZ 2048
/* Total octets in the FCS */
#define KNI_ENET_FCS_SIZE 4
+#define KNI_US_PER_SECOND 1000000
+#define KNI_SECOND_PER_DAY 86400
+
/*
* RX and TX Prefetch, Host, and Write-back threshold values should be
* carefully set for optimal performance. Consult the network
.hw_strip_crc = 0, /* CRC stripped by hardware */
},
.txmode = {
- .mq_mode = ETH_DCB_NONE,
+ .mq_mode = ETH_MQ_TX_NONE,
},
};
.config_network_if = kni_config_network_interface,
};
+static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
+
/* Print out statistics on packets handled */
static void
print_stats(void)
printf("====== ============== ============ ============ ============ ============\n");
}
-/* Custom handling of signals to handle stats */
+/* Custom handling of signals to handle stats and kni processing */
static void
signal_handler(int signum)
{
printf("\n**Statistics have been reset**\n");
return;
}
+
+ /* When we receive a RTMIN signal, stop kni processing */
+ if (signum == SIGRTMIN) {
+ printf("SIGRTMIN is received, and the KNI processing is "
+ "going to stop\n");
+ rte_atomic32_inc(&kni_stop);
+ return;
+ }
}
static void
num = rte_kni_tx_burst(kni, pkts_burst, nb_rx);
kni_stats[port_id].rx_packets += num;
+ rte_kni_handle_request(kni);
if (unlikely(num < nb_rx)) {
/* Free mbufs not tx to kni interface */
kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
}
/* Main processing loop */
-static __attribute__((noreturn)) int
+static int
main_loop(__rte_unused void *arg)
{
uint8_t pid;
const unsigned lcore_id = rte_lcore_id();
struct rte_kni *kni = kni_lcore_to_kni(lcore_id);
- if (kni == NULL) {
- RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
- for (;;)
- ; /* loop doing nothing */
- } else {
+ if (kni != NULL) {
pid = rte_kni_get_port_id(kni);
if (pid >= RTE_MAX_ETHPORTS)
rte_exit(EXIT_FAILURE, "Failure: port id >= %d\n",
fflush(stdout);
/* rx loop */
- while (1)
+ while (1) {
+ int32_t flag = rte_atomic32_read(&kni_stop);
+
+ if (flag)
+ break;
kni_ingress(kni);
+ }
} else if (kni_port_info[pid].lcore_id_egress == lcore_id) {
/* Running on lcores for output packets */
RTE_LOG(INFO, APP, "Lcore %u is writing to port %d\n",
fflush(stdout);
/* tx loop */
- while (1)
+ while (1) {
+ int32_t flag = rte_atomic32_read(&kni_stop);
+
+ if (flag)
+ break;
kni_egress(kni);
- } else {
- RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n",
- lcore_id);
- for (;;)
- ; /* loop doing nothing */
+ }
}
}
+
+ /* fallthrough to here if we don't have any work */
+ RTE_LOG(INFO, APP, "Lcore %u has nothing to do\n", lcore_id);
+
+ return 0;
}
/* Display usage instructions */
{
RTE_LOG(INFO, APP, "\nUsage: %s [EAL options] -- -p PORTMASK "
"-i IN_CORES -o OUT_CORES\n"
- " -p PORTMASK: hex bitmask of ports to use\n"
- " -i IN_CORES: hex bitmask of cores which read "
+ " -p PORTMASK: hex bitmask of ports to use\n"
+ " -i IN_CORES: hex bitmask of cores which read "
"from NIC\n"
- " -o OUT_CORES: hex bitmask of cores which write to NIC\n",
+ " -o OUT_CORES: hex bitmask of cores which write "
+ "to NIC\n",
prgname);
}
}
if (in_lcore != 0) {
- /* It is be for packet receiving */
+ /* It is for packet receiving */
while ((rx_port < nb_port) &&
((ports_mask & (1 << rx_port)) == 0))
rx_port++;
rte_exit(EXIT_FAILURE, "Could not configure port%u (%d)",
(unsigned)port, ret);
- ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, SOCKET, &rx_conf,
- pktmbuf_pool);
+ ret = rte_eth_rx_queue_setup(port, 0, NB_RXD, rte_eth_dev_socket_id(port),
+ &rx_conf, pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up RX queue for "
"port%u (%d)", (unsigned)port, ret);
- ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, SOCKET, &tx_conf);
+ ret = rte_eth_tx_queue_setup(port, 0, NB_TXD, rte_eth_dev_socket_id(port),
+ &tx_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Could not setup up TX queue for "
"port%u (%d)", (unsigned)port, ret);
/* Associate signal_hanlder function with USR signals */
signal(SIGUSR1, signal_handler);
signal(SIGUSR2, signal_handler);
+ signal(SIGRTMIN, signal_handler);
/* Initialise EAL */
ret = rte_eal_init(argc, argv);
MEMPOOL_CACHE_SZ,
sizeof(struct rte_pktmbuf_pool_private),
rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- SOCKET, 0);
+ rte_socket_id(), 0);
if (pktmbuf_pool == NULL) {
rte_exit(EXIT_FAILURE, "Could not initialise mbuf pool");
return -1;
}
/* Initialise PMD driver(s) */
-#ifdef RTE_LIBRTE_IGB_PMD
- ret = rte_igb_pmd_init();
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Could not initialise igb PMD (%d)",
- ret);
-#endif
-#ifdef RTE_LIBRTE_IXGBE_PMD
- ret = rte_ixgbe_pmd_init();
+ ret = rte_pmd_init_all();
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Could not initialise ixgbe PMD (%d)",
- ret);
-#endif
+ rte_exit(EXIT_FAILURE, "Could not initialise PMD (%d)", ret);
/* Scan PCI bus for recognised devices */
ret = rte_eal_pci_probe();
return -1;
}
+ for (port = 0; port < nb_sys_ports; port++) {
+ struct rte_kni *kni = kni_port_info[port].kni;
+
+ if (kni != NULL)
+ rte_kni_release(kni);
+ }
+
return 0;
}