RTE_TUNNEL_FILTER_OMAC_TENID_IMAC,};
/* Options for configuring ethernet port */
-static const struct rte_eth_conf port_conf = {
+static struct rte_eth_conf port_conf = {
.rxmode = {
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ .ignore_offload_bitfield = 1,
+ .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
},
.txmode = {
.mq_mode = ETH_MQ_TX_NONE,
+ .offloads = (DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM |
+ DEV_TX_OFFLOAD_SCTP_CKSUM |
+ DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_TCP_TSO |
+ DEV_TX_OFFLOAD_MULTI_SEGS |
+ DEV_TX_OFFLOAD_VXLAN_TNL_TSO),
},
};
struct rte_eth_rxconf *rxconf;
struct rte_eth_txconf *txconf;
struct vxlan_conf *pconf = &vxdev;
+ struct rte_eth_conf local_port_conf = port_conf;
pconf->dst_port = udp_port;
rxconf = &dev_info.default_rxconf;
txconf = &dev_info.default_txconf;
- txconf->txq_flags = 0;
+ txconf->txq_flags = ETH_TXQ_FLAGS_IGNORE;
if (port >= rte_eth_dev_count())
return -1;
rx_rings = nb_devices;
-
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
+ local_port_conf.txmode.offloads |=
+ DEV_TX_OFFLOAD_MBUF_FAST_FREE;
/* Configure ethernet device. */
- retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings,
+ &local_port_conf);
if (retval != 0)
return retval;
return retval;
/* Setup the queues. */
+ rxconf->offloads = local_port_conf.rxmode.offloads;
for (q = 0; q < rx_rings; q++) {
retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
rte_eth_dev_socket_id(port),
if (retval < 0)
return retval;
}
+ txconf->offloads = local_port_conf.txmode.offloads;
for (q = 0; q < tx_rings; q++) {
retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
rte_eth_dev_socket_id(port),