static struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
static struct rte_eth_conf port_conf = {
.rxmode = {
- .max_rx_pkt_len = ETHER_MAX_LEN,
+ .mq_mode = ETH_MQ_RX_RSS,
+ .max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
return;
}
- /* When we receive a RTMIN signal, stop kni processing */
- if (signum == SIGRTMIN) {
+ /* When we receive a RTMIN or SIGINT signal, stop kni processing */
+ if (signum == SIGRTMIN || signum == SIGINT){
printf("SIGRTMIN is received, and the KNI processing is "
"going to stop\n");
rte_atomic32_inc(&kni_stop);
signal(SIGUSR1, signal_handler);
signal(SIGUSR2, signal_handler);
signal(SIGRTMIN, signal_handler);
+ signal(SIGINT, signal_handler);
/* Initialise EAL */
ret = rte_eal_init(argc, argv);
},
.tx_free_thresh = 0, /* Use PMD default values */
.tx_rs_thresh = 0, /* Use PMD default values */
+ /*
+ * As the example won't handle mult-segments and offload cases,
+ * set the flag by default.
+ */
+ .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS,
};
struct rte_mempool * l2fwd_pktmbuf_pool = NULL;
static struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
static struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
static struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
/* Create the ipv4 exact match flow */
+ memset(&entry, 0, sizeof(entry));
switch (i & (NUMBER_PORT_USED -1)) {
case 0:
entry = ipv4_l3fwd_route_array[0];
uint8_t b = (uint8_t) (((i/NUMBER_PORT_USED)/BYTE_VALUE_MAX)%BYTE_VALUE_MAX);
uint8_t c = (uint8_t) ((i/NUMBER_PORT_USED)/(BYTE_VALUE_MAX*BYTE_VALUE_MAX));
/* Create the ipv6 exact match flow */
+ memset(&entry, 0, sizeof(entry));
switch (i & (NUMBER_PORT_USED - 1)) {
case 0: entry = ipv6_l3fwd_route_array[0]; break;
case 1: entry = ipv6_l3fwd_route_array[1]; break;
static struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
{
struct rte_eth_conf port_conf = {
.rxmode = {
- .mq_mode = ETH_MQ_RX_RSS,
+ .mq_mode = ETH_MQ_RX_RSS,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.hw_ip_checksum = 1, /**< IP checksum offload enabled */
}
for (q = 0; q < tx_rings; q ++) {
- retval = rte_eth_tx_queue_setup(port, q, RX_RING_SIZE,
+ retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
rte_eth_dev_socket_id(port), &tx_conf_default);
if (retval < 0)
return retval;
***/
static struct rte_eth_conf port_conf = {
.rxmode = {
+ .mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0,
{.cir = 1000000 * 46, .pir = 1500000 * 46, .cbs = 2048, .pbs = 2048},
};
-#define DIM(a) (sizeof (a) / sizeof ((a)[0]))
#define APP_FLOWS_MAX 256
FLOW_METER app_flows[APP_FLOWS_MAX];
{
uint32_t i, j;
- for (i = 0, j = 0; i < APP_FLOWS_MAX; i ++, j = (j + 1) % DIM(PARAMS)){
+ for (i = 0, j = 0; i < APP_FLOWS_MAX; i ++, j = (j + 1) % RTE_DIM(PARAMS)){
FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
}
}
ether_addr_copy(&mac_addr, &hdr->s_addr);
void *tmp = &hdr->d_addr.addr_bytes[0];
- *((uint64_t *)tmp) = 0x010000C28001;
+ *((uint64_t *)tmp) = 0x010000C28001ULL;
hdr->ether_type = rte_cpu_to_be_16(ETHER_TYPE_FLOW_CONTROL);
.hthresh = RX_HTHRESH,
.wthresh = RX_WTHRESH,
},
+ .rx_drop_en = 1,
};
/*
* Main thread that does the work, reading from INPUT_PORT
* and writing to OUTPUT_PORT
*/
-static __attribute__((noreturn)) int
+static int
lcore_main(__attribute__((__unused__)) void* dummy)
{
const uint16_t lcore_id = (uint16_t)rte_lcore_id();
printf("core %u(lcore %u) reading queues %i-%i\n", (unsigned)core_id,
(unsigned)lcore_id, startQueue, endQueue - 1);
+ if (startQueue == endQueue) {
+ printf("lcore %u has nothing to do\n", lcore_id);
+ return (0);
+ }
+
for (;;) {
struct rte_mbuf *buf[MAX_PKT_BURST];
const uint16_t buf_size = sizeof(buf) / sizeof(buf[0]);