- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
.header_split = 0, /**< Header Split disabled */
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = 0x0,
-};
-
uint64_t nb_rx_processed;
/* total iterations looped recently */
uint64_t nb_iteration_looped;
uint64_t nb_rx_processed;
/* total iterations looped recently */
uint64_t nb_iteration_looped;
static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
static inline enum freq_scale_hint_t power_freq_scaleup_heuristic( \
static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
static inline enum freq_scale_hint_t power_freq_scaleup_heuristic( \
/* accumulate total execution time in us when callback is invoked */
sleep_time_ratio = (float)(stats[lcore_id].sleep_time) /
(float)SCALING_PERIOD;
/* accumulate total execution time in us when callback is invoked */
sleep_time_ratio = (float)(stats[lcore_id].sleep_time) /
(float)SCALING_PERIOD;
{
/**
* HW Rx queue size is 128 by default, Rx burst read at maximum 32 entries
* per iteration
*/
#define FREQ_GEAR1_RX_PACKET_THRESHOLD MAX_PKT_BURST
{
/**
* HW Rx queue size is 128 by default, Rx burst read at maximum 32 entries
* per iteration
*/
#define FREQ_GEAR1_RX_PACKET_THRESHOLD MAX_PKT_BURST
#define FREQ_UP_TREND1_ACC 1
#define FREQ_UP_TREND2_ACC 100
#define FREQ_UP_THRESHOLD 10000
#define FREQ_UP_TREND1_ACC 1
#define FREQ_UP_TREND2_ACC 100
#define FREQ_UP_THRESHOLD 10000
- /**
- * there are received packets to process, staying at C0 state while
- * trying to scale up frequency depending on how many entries on h/w
- * queue. Determine frequency scaleup trend based on availiable entries
- * on Rx queues.
- */
- if (rx_ring_length > FREQ_GEAR3_RX_PACKET_THRESHOLD) {
+ if (likely(rte_eth_rx_descriptor_done(port_id, queue_id,
+ FREQ_GEAR3_RX_PACKET_THRESHOLD) > 0)) {
- /**
- * get availiable descriptor number via MMIO read is costly,
- * so only do it when recent poll returns maximum number.
- */
- if (nb_rx >= MAX_PKT_BURST)
- rx_ring_length = rte_eth_rx_queue_count(portid, queueid);
- else
- rx_ring_length = 0;
-
if (lcore_scaleup_hint == FREQ_HIGHEST)
rte_power_freq_max(lcore_id);
else if (lcore_scaleup_hint == FREQ_HIGHER)
if (lcore_scaleup_hint == FREQ_HIGHEST)
rte_power_freq_max(lcore_id);
else if (lcore_scaleup_hint == FREQ_HIGHER)
- rte_snprintf(s, sizeof(s), "%.*s", size, p);
+ snprintf(s, sizeof(s), "%.*s", size, p);
if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
_NUM_FLD)
return -1;
if (rte_strsplit(s, sizeof(s), str_fld, _NUM_FLD, ',') !=
_NUM_FLD)
return -1;
"max packet length to %u\n",
(unsigned int)port_conf.rxmode.max_rx_pkt_len);
}
"max packet length to %u\n",
(unsigned int)port_conf.rxmode.max_rx_pkt_len);
}
- printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
- eth_addr->addr_bytes[0],
- eth_addr->addr_bytes[1],
- eth_addr->addr_bytes[2],
- eth_addr->addr_bytes[3],
- eth_addr->addr_bytes[4],
- eth_addr->addr_bytes[5]);
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", name, buf);
- rte_snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
+ snprintf(s, sizeof(s), "ipv4_l3fwd_hash_%d", socketid);
ipv4_l3fwd_hash_params.name = s;
ipv4_l3fwd_hash_params.socket_id = socketid;
ipv4_l3fwd_lookup_struct[socketid] =
ipv4_l3fwd_hash_params.name = s;
ipv4_l3fwd_hash_params.socket_id = socketid;
ipv4_l3fwd_lookup_struct[socketid] =
- rte_snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
+ snprintf(s, sizeof(s), "ipv6_l3fwd_hash_%d", socketid);
ipv6_l3fwd_hash_params.name = s;
ipv6_l3fwd_hash_params.socket_id = socketid;
ipv6_l3fwd_lookup_struct[socketid] =
ipv6_l3fwd_hash_params.name = s;
ipv6_l3fwd_hash_params.socket_id = socketid;
ipv6_l3fwd_lookup_struct[socketid] =
- rte_snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
+ snprintf(s, sizeof(s), "IPV4_L3FWD_LPM_%d", socketid);
ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
IPV4_L3FWD_LPM_MAX_RULES, 0);
if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
ipv4_l3fwd_lookup_struct[socketid] = rte_lpm_create(s, socketid,
IPV4_L3FWD_LPM_MAX_RULES, 0);
if (ipv4_l3fwd_lookup_struct[socketid] == NULL)
- rte_snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
+ snprintf(s, sizeof(s), "mbuf_pool_%d", socketid);
- rte_mempool_create(s, nb_mbuf,
- MBUF_SIZE, MEMPOOL_CACHE_SIZE,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- socketid, 0);
+ rte_pktmbuf_pool_create(s, nb_mbuf,
+ MEMPOOL_CACHE_SIZE, 0, MBUF_DATA_SIZE,
+ socketid);
if (pktmbuf_pool[socketid] == NULL)
rte_exit(EXIT_FAILURE,
"Cannot init mbuf pool on socket %d\n",
if (pktmbuf_pool[socketid] == NULL)
rte_exit(EXIT_FAILURE,
"Cannot init mbuf pool on socket %d\n",
- /* init driver(s) */
- if (rte_pmd_init_all() < 0)
- rte_exit(EXIT_FAILURE, "Cannot init pmd\n");
-
- if (rte_eal_pci_probe() < 0)
- rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
-
nb_ports = rte_eth_dev_count();
if (nb_ports > RTE_MAX_ETHPORTS)
nb_ports = RTE_MAX_ETHPORTS;
nb_ports = rte_eth_dev_count();
if (nb_ports > RTE_MAX_ETHPORTS)
nb_ports = RTE_MAX_ETHPORTS;
printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
fflush(stdout);
printf("txq=%u,%d,%d ", lcore_id, queueid, socketid);
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,
fflush(stdout);
ret = rte_eth_rx_queue_setup(portid, queueid, nb_rxd,