#include <rte_memory.h>
#include <rte_memcpy.h>
#include <rte_memzone.h>
-#include <rte_tailq.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
#include <rte_ip_frag.h>
-#include "main.h"
-
#define MAX_PKT_BURST 32
#define MAX_JUMBO_PKT_LEN 9600
-#define BUF_SIZE 2048
+#define BUF_SIZE RTE_MBUF_DEFAULT_DATAROOM
#define MBUF_SIZE \
(BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
static uint32_t max_flow_num = DEF_FLOW_NUM;
static uint32_t max_flow_ttl = DEF_FLOW_TTL;
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /**< Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /**< Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /**< Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /**< Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /**< Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /**< Default values of TX write-back threshold reg. */
-
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define NB_SOCKETS 8
},
};
-static const struct rte_eth_rxconf rx_conf = {
- .rx_thresh = {
- .pthresh = RX_PTHRESH,
- .hthresh = RX_HTHRESH,
- .wthresh = RX_WTHRESH,
- },
- .rx_free_thresh = 32,
-};
-
-static const struct rte_eth_txconf tx_conf = {
- .tx_thresh = {
- .pthresh = TX_PTHRESH,
- .hthresh = TX_HTHRESH,
- .wthresh = TX_WTHRESH,
- },
- .tx_free_thresh = 0, /* Use PMD default values */
- .tx_rs_thresh = 0, /* Use PMD default values */
- .txq_flags = 0x0,
-};
-
/*
* IPv4 forwarding table
*/
dst_port = portid;
/* if packet is IPv4 */
- if (m->ol_flags & (PKT_RX_IPV4_HDR)) {
+ if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
struct ipv4_hdr *ip_hdr;
uint32_t ip_dst;
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr);
+ m->l2_len = sizeof(*eth_hdr);
+ m->l3_len = sizeof(*ip_hdr);
/* process this fragment. */
mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
}
eth_hdr->ether_type = rte_be_to_cpu_16(ETHER_TYPE_IPv4);
- }
- /* if packet is IPv6 */
- else if (m->ol_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) {
+ } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
+ /* if packet is IPv6 */
struct ipv6_extension_fragment *frag_hdr;
struct ipv6_hdr *ip_hdr;
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->pkt.vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->pkt.vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
+ m->l2_len = sizeof(*eth_hdr);
+ m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
if (mo == NULL)
static void
print_ethaddr(const char *name, const struct ether_addr *eth_addr)
{
- printf ("%s%02X:%02X:%02X:%02X:%02X:%02X", name,
- eth_addr->addr_bytes[0],
- eth_addr->addr_bytes[1],
- eth_addr->addr_bytes[2],
- eth_addr->addr_bytes[3],
- eth_addr->addr_bytes[4],
- eth_addr->addr_bytes[5]);
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", name, buf);
}
/* Check the link status of all ports in up to 9s, and print them finally */
n = RTE_MAX(max_flow_num, 2UL * MAX_PKT_BURST);
sz = sizeof (*mtb) + sizeof (mtb->m_table[0]) * n;
- if ((mtb = rte_zmalloc_socket(__func__, sz, CACHE_LINE_SIZE,
+ if ((mtb = rte_zmalloc_socket(__func__, sz, RTE_CACHE_LINE_SIZE,
socket)) == NULL) {
RTE_LOG(ERR, IP_RSMBL, "%s() for lcore: %u, port: %u "
"failed to allocate %zu bytes\n",
nb_mbuf = RTE_MAX(nb_mbuf, (uint32_t)NB_MBUF);
- rte_snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
+ snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
sizeof(struct rte_pktmbuf_pool_private),
if (socket_lpm[socket] == NULL) {
RTE_LOG(INFO, IP_RSMBL, "Creating LPM table on socket %i\n", socket);
- rte_snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
+ snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
lpm = rte_lpm_create(buf, socket, LPM_MAX_RULES, 0);
if (lpm == NULL) {
if (socket_lpm6[socket] == NULL) {
RTE_LOG(INFO, IP_RSMBL, "Creating LPM6 table on socket %i\n", socket);
- rte_snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
+ snprintf(buf, sizeof(buf), "IP_RSMBL_LPM_%i", socket);
lpm6 = rte_lpm6_create("IP_RSMBL_LPM6", socket, &lpm6_config);
if (lpm6 == NULL) {
}
int
-MAIN(int argc, char **argv)
+main(int argc, char **argv)
{
struct lcore_queue_conf *qconf;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_txconf *txconf;
struct rx_queue *rxq;
int ret, socket;
unsigned nb_ports;
if (ret < 0)
rte_exit(EXIT_FAILURE, "Invalid IP reassembly parameters\n");
- if (rte_eal_pci_probe() < 0)
- rte_exit(EXIT_FAILURE, "Cannot probe PCI\n");
-
nb_ports = rte_eth_dev_count();
if (nb_ports > RTE_MAX_ETHPORTS)
nb_ports = RTE_MAX_ETHPORTS;
/* init one RX queue */
ret = rte_eth_rx_queue_setup(portid, 0, nb_rxd,
- socket, &rx_conf,
+ socket, NULL,
rxq->pool);
if (ret < 0) {
printf("\n");
printf("txq=%u,%d,%d ", lcore_id, queueid, socket);
fflush(stdout);
+
+ rte_eth_dev_info_get(portid, &dev_info);
+ txconf = &dev_info.default_txconf;
+ txconf->txq_flags = 0;
+
ret = rte_eth_tx_queue_setup(portid, queueid, nb_txd,
- socket, &tx_conf);
+ socket, txconf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
"port=%d\n", ret, portid);