/*-
* BSD LICENSE
- *
+ *
* Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
- *
+ *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
- *
+ *
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* * Neither the name of Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
- *
+ *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#endif
/*
- * Buffer pool configuration
+ * Buffer pool configuration
*
***/
-#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE (2048 + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF 8192
#define MEMPOOL_CACHE_SIZE 256
.rx_adv_conf = {
.rss_conf = {
.rss_key = NULL,
- .rss_hf = ETH_RSS_IPV4 | ETH_RSS_IPV6,
+ .rss_hf = ETH_RSS_IP,
},
},
.txmode = {
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8, /* RX prefetch threshold reg */
- .hthresh = 8, /* RX host threshold reg */
- .wthresh = 4, /* RX write-back threshold reg */
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36, /* TX prefetch threshold reg */
- .hthresh = 0, /* TX host threshold reg */
- .wthresh = 0, /* TX write-back threshold reg */
- },
- .tx_free_thresh = 0,
- .tx_rs_thresh = 0,
- .txq_flags = 0x0,
-};
-
#define NIC_RX_QUEUE_DESC 128
#define NIC_TX_QUEUE_DESC 512
FLOW_METER app_flows[APP_FLOWS_MAX];
-static void
+static void
app_configure_flow_table(void)
{
uint32_t i, j;
while (1) {
uint64_t time_diff;
int i, nb_rx;
-
+
/* Mechanism to avoid stale packets in the output buffer */
current_time = rte_rdtsc();
time_diff = current_time - last_time;
if (unlikely(time_diff > TIME_TX_DRAIN)) {
int ret;
-
+
if (pkts_tx_len == 0) {
last_time = current_time;
-
+
continue;
}
/* Write packet burst to NIC TX */
ret = rte_eth_tx_burst(port_tx, NIC_TX_QUEUE, pkts_tx, pkts_tx_len);
-
+
/* Free buffers for any packets not written successfully */
if (unlikely(ret < pkts_tx_len)) {
for ( ; ret < pkts_tx_len; ret ++) {
/* Empty the output buffer */
pkts_tx_len = 0;
-
+
last_time = current_time;
}
-
+
/* Read packet burst from NIC RX */
nb_rx = rte_eth_rx_burst(port_rx, NIC_RX_QUEUE, pkts_rx, PKT_RX_BURST_MAX);
-
+
/* Handle packets */
for (i = 0; i < nb_rx; i ++) {
struct rte_mbuf *pkt = pkts_rx[i];
-
+
/* Handle current packet */
if (app_pkt_handle(pkt, current_time) == DROP)
rte_pktmbuf_free(pkt);
pkts_tx[pkts_tx_len] = pkt;
pkts_tx_len ++;
}
-
+
/* Write packets from output buffer to NIC TX when full burst is available */
if (unlikely(pkts_tx_len == PKT_TX_BURST_MAX)) {
/* Write packet burst to NIC TX */
int ret = rte_eth_tx_burst(port_tx, NIC_TX_QUEUE, pkts_tx, PKT_TX_BURST_MAX);
-
+
/* Free buffers for any packets not written successfully */
if (unlikely(ret < PKT_TX_BURST_MAX)) {
for ( ; ret < PKT_TX_BURST_MAX; ret ++) {
rte_pktmbuf_free(pkts_tx[ret]);
}
}
-
+
/* Empty the output buffer */
pkts_tx_len = 0;
}
static struct option lgopts[] = {
{NULL, 0, 0, 0}
};
- uint64_t port_mask, i, mask;
+ uint64_t port_mask, i, mask;
argvopt = argv;
print_usage(prgname);
return -1;
}
-
+
for (i = 0, mask = 1; i < 64; i ++, mask <<= 1){
if (mask & port_mask){
port_rx = i;
break;
}
}
-
+
if (port_mask != 0) {
printf("invalid port mask (more than 2 ports)\n");
print_usage(prgname);
return -1;
}
break;
-
+
default:
print_usage(prgname);
return -1;
}
int
-MAIN(int argc, char **argv)
+main(int argc, char **argv)
{
uint32_t lcore_id;
int ret;
argc -= ret;
argv += ret;
if (rte_lcore_count() != 1) {
- rte_exit(EXIT_FAILURE, "This application does not accept more than one core. "
+ rte_exit(EXIT_FAILURE, "This application does not accept more than one core. "
"Please adjust the \"-c COREMASK\" parameter accordingly.\n");
}
-
+
/* Application non-EAL arguments parse */
ret = parse_args(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid input arguments\n");
/* Buffer pool init */
- pool = rte_mempool_create("pool", NB_MBUF, MBUF_SIZE, MEMPOOL_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+ pool = rte_pktmbuf_pool_create("pool", NB_MBUF, MEMPOOL_CACHE_SIZE,
+ 0, MBUF_DATA_SIZE, rte_socket_id());
if (pool == NULL)
rte_exit(EXIT_FAILURE, "Buffer pool creation error\n");
- /* PMD init */
- if (rte_pmd_init_all() < 0)
- rte_exit(EXIT_FAILURE, "PMD init error\n");
-
- if (rte_eal_pci_probe() < 0)
- rte_exit(EXIT_FAILURE, "PCI probe error\n");
-
/* NIC init */
ret = rte_eth_dev_configure(port_rx, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
- ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC, rte_eth_dev_socket_id(port_rx), &rx_conf, pool);
+ ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_rx),
+ NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
-
- ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC, rte_eth_dev_socket_id(port_rx), &tx_conf);
+
+ ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_rx),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
- ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC, rte_eth_dev_socket_id(port_tx), &rx_conf, pool);
+ ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_tx),
+ NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
- ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC, rte_eth_dev_socket_id(port_tx), &tx_conf);
+ ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_tx),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);
ret = rte_eth_dev_start(port_rx);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_rx, ret);
-
+
ret = rte_eth_dev_start(port_tx);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_tx, ret);
rte_eth_promiscuous_enable(port_rx);
rte_eth_promiscuous_enable(port_tx);
-
+
/* App configuration */
app_configure_flow_table();