* reference CYCLES to be used to
* measure core busyness based on poll count
*/
-#define MIN_CYCLES 1500000ULL
-#define MAX_CYCLES 2500000ULL
+#define MIN_CYCLES 1500000ULL
+#define MAX_CYCLES 22000000ULL
/* (500ms) */
#define TELEMETRY_INTERVALS_PER_SEC 2
};
struct lcore_params *lcore_params = lcore_params_array_default;
-uint16_t nb_lcore_params = sizeof(lcore_params_array_default) /
- sizeof(lcore_params_array_default[0]);
+uint16_t nb_lcore_params = RTE_DIM(lcore_params_array_default);
static struct rte_eth_conf port_conf = {
.rxmode = {
uint16_t port_dst;
uint16_t port_src;
uint8_t proto;
-} __attribute__((__packed__));
+} __rte_packed;
struct ipv6_5tuple {
uint8_t ip_dst[IPV6_ADDR_LEN];
uint16_t port_dst;
uint16_t port_src;
uint8_t proto;
-} __attribute__((__packed__));
+} __rte_packed;
struct ipv4_l3fwd_route {
struct ipv4_5tuple key;
#define L3FWD_HASH_ENTRIES 1024
-#define IPV4_L3FWD_NUM_ROUTES \
- (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
-
-#define IPV6_L3FWD_NUM_ROUTES \
- (sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
-
static uint16_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
static uint16_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
#endif
{RTE_IPV4(8,1,1,0), 24, 7},
};
-#define IPV4_L3FWD_NUM_ROUTES \
- (sizeof(ipv4_l3fwd_route_array) / sizeof(ipv4_l3fwd_route_array[0]))
-
#define IPV4_L3FWD_LPM_MAX_RULES 1024
typedef struct rte_lpm lookup_struct_t;
/* Freqency scale down timer callback */
static void
-power_timer_cb(__attribute__((unused)) struct rte_timer *tim,
- __attribute__((unused)) void *arg)
+power_timer_cb(__rte_unused struct rte_timer *tim,
+ __rte_unused void *arg)
{
uint64_t hz;
float sleep_time_ratio;
port_id = ((uintptr_t)data) >> CHAR_BIT;
queue_id = ((uintptr_t)data) &
RTE_LEN2MASK(CHAR_BIT, uint8_t);
- rte_eth_dev_rx_intr_disable(port_id, queue_id);
RTE_LOG(INFO, L3FWD_POWER,
"lcore %u is waked up from rx interrupt on"
" port %d queue %d\n",
return 0;
}
-static void turn_on_intr(struct lcore_conf *qconf)
+static void turn_on_off_intr(struct lcore_conf *qconf, bool on)
{
int i;
struct lcore_rx_queue *rx_queue;
queue_id = rx_queue->queue_id;
rte_spinlock_lock(&(locks[port_id]));
- rte_eth_dev_rx_intr_enable(port_id, queue_id);
+ if (on)
+ rte_eth_dev_rx_intr_enable(port_id, queue_id);
+ else
+ rte_eth_dev_rx_intr_disable(port_id, queue_id);
rte_spinlock_unlock(&(locks[port_id]));
}
}
}
/* main processing loop */
static int
-main_telemetry_loop(__attribute__((unused)) void *dummy)
+main_telemetry_loop(__rte_unused void *dummy)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned int lcore_id;
br = FULL;
} else if (diff_tsc > MIN_CYCLES &&
diff_tsc < MAX_CYCLES) {
- br = PARTIAL;
+ br = (diff_tsc * 100) / MAX_CYCLES;
} else {
br = ZERO;
}
}
/* main processing loop */
static int
-main_empty_poll_loop(__attribute__((unused)) void *dummy)
+main_empty_poll_loop(__rte_unused void *dummy)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned int lcore_id;
}
/* main processing loop */
static int
-main_loop(__attribute__((unused)) void *dummy)
+main_loop(__rte_unused void *dummy)
{
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
*/
rte_delay_us(lcore_idle_hint);
else {
- /* suspend until rx interrupt trigges */
+ /* suspend until rx interrupt triggers */
if (intr_en) {
- turn_on_intr(qconf);
+ turn_on_off_intr(qconf, 1);
sleep_until_rx_interrupt(
qconf->n_rx_queue);
+ turn_on_off_intr(qconf, 0);
/**
* start receiving packets immediately
*/
/* populate the ipv4 hash */
- for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
+ for (i = 0; i < RTE_DIM(ipv4_l3fwd_route_array); i++) {
ret = rte_hash_add_key (ipv4_l3fwd_lookup_struct[socketid],
(void *) &ipv4_l3fwd_route_array[i].key);
if (ret < 0) {
}
/* populate the ipv6 hash */
- for (i = 0; i < IPV6_L3FWD_NUM_ROUTES; i++) {
+ for (i = 0; i < RTE_DIM(ipv6_l3fwd_route_array); i++) {
ret = rte_hash_add_key (ipv6_l3fwd_lookup_struct[socketid],
(void *) &ipv6_l3fwd_route_array[i].key);
if (ret < 0) {
" on socket %d\n", socketid);
/* populate the LPM table */
- for (i = 0; i < IPV4_L3FWD_NUM_ROUTES; i++) {
+ for (i = 0; i < RTE_DIM(ipv4_l3fwd_route_array); i++) {
ret = rte_lpm_add(ipv4_l3fwd_lookup_struct[socketid],
ipv4_l3fwd_route_array[i].ip,
ipv4_l3fwd_route_array[i].depth,
uint8_t count, all_ports_up, print_flag = 0;
uint16_t portid;
struct rte_eth_link link;
+ int ret;
printf("\nChecking link status");
fflush(stdout);
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
- rte_eth_link_get_nowait(portid, &link);
+ ret = rte_eth_link_get_nowait(portid, &link);
+ if (ret < 0) {
+ all_ports_up = 0;
+ if (print_flag == 1)
+ printf("Port %u link get failed: %s\n",
+ portid, rte_strerror(-ret));
+ continue;
+ }
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
return ret;
}
static void
-update_telemetry(__attribute__((unused)) struct rte_timer *tim,
- __attribute__((unused)) void *arg)
+update_telemetry(__rte_unused struct rte_timer *tim,
+ __rte_unused void *arg)
{
unsigned int lcore_id = rte_lcore_id();
struct lcore_conf *qconf;
printf("Initializing port %d ... ", portid );
fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
+ ret = rte_eth_dev_info_get(portid, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-ret));
+
dev_rxq_num = dev_info.max_rx_queues;
dev_txq_num = dev_info.max_tx_queues;
/* If number of Rx queue is 0, no need to enable Rx interrupt */
if (nb_rx_queue == 0)
local_port_conf.intr_conf.rxq = 0;
- rte_eth_dev_info_get(portid, &dev_info);
+
+ ret = rte_eth_dev_info_get(portid, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-ret));
+
if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
local_port_conf.txmode.offloads |=
DEV_TX_OFFLOAD_MBUF_FAST_FREE;
"Cannot adjust number of descriptors: err=%d, port=%d\n",
ret, portid);
- rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+ ret = rte_eth_macaddr_get(portid, &ports_eth_addr[portid]);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot get MAC address: err=%d, port=%d\n",
+ ret, portid);
+
print_ethaddr(" Address:", &ports_eth_addr[portid]);
printf(", ");
/* init RX queues */
for(queue = 0; queue < qconf->n_rx_queue; ++queue) {
struct rte_eth_rxconf rxq_conf;
- struct rte_eth_dev *dev;
- struct rte_eth_conf *conf;
portid = qconf->rx_queue_list[queue].port_id;
queueid = qconf->rx_queue_list[queue].queue_id;
- dev = &rte_eth_devices[portid];
- conf = &dev->data->dev_conf;
if (numa_on)
socketid = \
printf("rxq=%d,%d,%d ", portid, queueid, socketid);
fflush(stdout);
- rte_eth_dev_info_get(portid, &dev_info);
+ ret = rte_eth_dev_info_get(portid, &dev_info);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Error during getting device (port %u) info: %s\n",
+ portid, strerror(-ret));
+
rxq_conf = dev_info.default_rxconf;
- rxq_conf.offloads = conf->rxmode.offloads;
+ rxq_conf.offloads = port_conf.rxmode.offloads;
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
socketid, &rxq_conf,
pktmbuf_pool[socketid]);
* to itself through 2 cross-connected ports of the
* target machine.
*/
- if (promiscuous_on)
- rte_eth_promiscuous_enable(portid);
+ if (promiscuous_on) {
+ ret = rte_eth_promiscuous_enable(portid);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_promiscuous_enable: err=%s, port=%u\n",
+ rte_strerror(-ret), portid);
+ }
/* initialize spinlock for each port */
rte_spinlock_init(&(locks[portid]));
}