/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_common.h>
#include <rte_eal.h>
+#include <rte_malloc.h>
#include <rte_mempool.h>
#include <rte_ethdev.h>
#include <rte_cycles.h>
* Buffer pool configuration
*
***/
-#define MBUF_SIZE (2048 + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
#define NB_MBUF 8192
#define MEMPOOL_CACHE_SIZE 256
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = 8, /* RX prefetch threshold reg */
- .hthresh = 8, /* RX host threshold reg */
- .wthresh = 4, /* RX write-back threshold reg */
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = 36, /* TX prefetch threshold reg */
- .hthresh = 0, /* TX host threshold reg */
- .wthresh = 0, /* TX write-back threshold reg */
- },
- .tx_free_thresh = 0,
- .tx_rs_thresh = 0,
- .txq_flags = 0x0,
-};
-
#define NIC_RX_QUEUE_DESC 128
#define NIC_TX_QUEUE_DESC 512
static uint8_t port_rx;
static uint8_t port_tx;
static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX];
-static struct rte_mbuf *pkts_tx[PKT_TX_BURST_MAX];
-static uint16_t pkts_tx_len = 0;
-
+struct rte_eth_dev_tx_buffer *tx_buffer;
struct rte_meter_srtcm_params app_srtcm_params[] = {
{.cir = 1000000 * 46, .cbs = 2048, .ebs = 2048},
FLOW_METER app_flows[APP_FLOWS_MAX];
-static void
+static int
app_configure_flow_table(void)
{
uint32_t i, j;
+ int ret;
- for (i = 0, j = 0; i < APP_FLOWS_MAX; i ++, j = (j + 1) % RTE_DIM(PARAMS)){
- FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
+ for (i = 0, j = 0; i < APP_FLOWS_MAX;
+ i ++, j = (j + 1) % RTE_DIM(PARAMS)) {
+ ret = FUNC_CONFIG(&app_flows[i], &PARAMS[j]);
+ if (ret)
+ return ret;
}
+
+ return 0;
}
static inline void
current_time = rte_rdtsc();
time_diff = current_time - last_time;
if (unlikely(time_diff > TIME_TX_DRAIN)) {
- int ret;
-
- if (pkts_tx_len == 0) {
- last_time = current_time;
-
- continue;
- }
-
- /* Write packet burst to NIC TX */
- ret = rte_eth_tx_burst(port_tx, NIC_TX_QUEUE, pkts_tx, pkts_tx_len);
-
- /* Free buffers for any packets not written successfully */
- if (unlikely(ret < pkts_tx_len)) {
- for ( ; ret < pkts_tx_len; ret ++) {
- rte_pktmbuf_free(pkts_tx[ret]);
- }
- }
-
- /* Empty the output buffer */
- pkts_tx_len = 0;
-
+ /* Flush tx buffer */
+ rte_eth_tx_buffer_flush(port_tx, NIC_TX_QUEUE, tx_buffer);
last_time = current_time;
}
/* Handle current packet */
if (app_pkt_handle(pkt, current_time) == DROP)
rte_pktmbuf_free(pkt);
- else {
- pkts_tx[pkts_tx_len] = pkt;
- pkts_tx_len ++;
- }
-
- /* Write packets from output buffer to NIC TX when full burst is available */
- if (unlikely(pkts_tx_len == PKT_TX_BURST_MAX)) {
- /* Write packet burst to NIC TX */
- int ret = rte_eth_tx_burst(port_tx, NIC_TX_QUEUE, pkts_tx, PKT_TX_BURST_MAX);
-
- /* Free buffers for any packets not written successfully */
- if (unlikely(ret < PKT_TX_BURST_MAX)) {
- for ( ; ret < PKT_TX_BURST_MAX; ret ++) {
- rte_pktmbuf_free(pkts_tx[ret]);
- }
- }
-
- /* Empty the output buffer */
- pkts_tx_len = 0;
- }
+ else
+ rte_eth_tx_buffer(port_tx, NIC_TX_QUEUE, tx_buffer, pkt);
}
}
}
}
int
-MAIN(int argc, char **argv)
+main(int argc, char **argv)
{
uint32_t lcore_id;
int ret;
rte_exit(EXIT_FAILURE, "Invalid input arguments\n");
/* Buffer pool init */
- pool = rte_mempool_create("pool", NB_MBUF, MBUF_SIZE, MEMPOOL_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private), rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+ pool = rte_pktmbuf_pool_create("pool", NB_MBUF, MEMPOOL_CACHE_SIZE,
+ 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
if (pool == NULL)
rte_exit(EXIT_FAILURE, "Buffer pool creation error\n");
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_rx, ret);
- ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC, rte_eth_dev_socket_id(port_rx), &rx_conf, pool);
+ ret = rte_eth_rx_queue_setup(port_rx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_rx),
+ NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_rx, ret);
- ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC, rte_eth_dev_socket_id(port_rx), &tx_conf);
+ ret = rte_eth_tx_queue_setup(port_rx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_rx),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_rx, ret);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d configuration error (%d)\n", port_tx, ret);
- ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC, rte_eth_dev_socket_id(port_tx), &rx_conf, pool);
+ ret = rte_eth_rx_queue_setup(port_tx, NIC_RX_QUEUE, NIC_RX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_tx),
+ NULL, pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d RX queue setup error (%d)\n", port_tx, ret);
- ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC, rte_eth_dev_socket_id(port_tx), &tx_conf);
+ ret = rte_eth_tx_queue_setup(port_tx, NIC_TX_QUEUE, NIC_TX_QUEUE_DESC,
+ rte_eth_dev_socket_id(port_tx),
+ NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d TX queue setup error (%d)\n", port_tx, ret);
+ tx_buffer = rte_zmalloc_socket("tx_buffer",
+ RTE_ETH_TX_BUFFER_SIZE(PKT_TX_BURST_MAX), 0,
+ rte_eth_dev_socket_id(port_tx));
+ if (tx_buffer == NULL)
+ rte_exit(EXIT_FAILURE, "Port %d TX buffer allocation error\n",
+ port_tx);
+
+ rte_eth_tx_buffer_init(tx_buffer, PKT_TX_BURST_MAX);
+
ret = rte_eth_dev_start(port_rx);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Port %d start error (%d)\n", port_rx, ret);
rte_eth_promiscuous_enable(port_tx);
/* App configuration */
- app_configure_flow_table();
+ ret = app_configure_flow_table();
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid configure flow table\n");
/* Launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);