static char *socket_files;
static int nb_sockets;
-/* empty vmdq configuration structure. Filled in programatically */
+/* empty VMDq configuration structure. Filled in programmatically */
static struct rte_eth_conf vmdq_conf_default = {
.rxmode = {
.mq_mode = RTE_ETH_MQ_RX_VMDQ_ONLY,
/*
* VLAN strip is necessary for 1G NIC such as I350,
* this fixes bug of ipv4 forwarding in guest can't
- * forward pakets from one virtio dev to another virtio dev.
+ * forward packets from one virtio dev to another virtio dev.
*/
.offloads = RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
},
#define MBUF_TABLE_DRAIN_TSC ((rte_get_tsc_hz() + US_PER_S - 1) \
/ US_PER_S * BURST_TX_DRAIN_US)
-#define VLAN_HLEN 4
static inline int
open_dma(const char *value)
tx_rings = (uint16_t)rte_lcore_count();
+ if (mergeable) {
+ if (dev_info.max_mtu != UINT16_MAX && dev_info.max_rx_pktlen > dev_info.max_mtu)
+ vmdq_conf_default.rxmode.mtu = dev_info.max_mtu;
+ else
+ vmdq_conf_default.rxmode.mtu = MAX_MTU;
+ }
+
/* Get port configuration. */
retval = get_eth_conf(&port_conf, num_devices);
if (retval < 0)
" --nb-devices ND\n"
" -p PORTMASK: Set mask for ports to be used by application\n"
" --vm2vm [0|1|2]: disable/software(default)/hardware vm2vm comms\n"
- " --rx-retry [0|1]: disable/enable(default) retries on rx. Enable retry if destintation queue is full\n"
+ " --rx-retry [0|1]: disable/enable(default) retries on Rx. Enable retry if destination queue is full\n"
" --rx-retry-delay [0-N]: timeout(in usecond) between retries on RX. This makes effect only if retries on rx enabled\n"
" --rx-retry-num [0-N]: the number of retries on rx. This makes effect only if retries on rx enabled\n"
" --mergeable [0|1]: disable(default)/enable RX mergeable buffers\n"
return -1;
}
mergeable = !!ret;
- if (ret)
- vmdq_conf_default.rxmode.mtu = MAX_MTU;
break;
case OPT_STATS_NUM:
* by minus length of vlan tag, so need restore
* the packet length by plus it.
*/
- *offset = VLAN_HLEN;
+ *offset = RTE_VLAN_HLEN;
*vlan_tag = vlan_tags[vdev->vid];
RTE_LOG_DP(DEBUG, VHOST_DATA,
tcp_hdr = rte_pktmbuf_mtod_offset(m, struct rte_tcp_hdr *,
m->l2_len + m->l3_len);
- m->ol_flags |= PKT_TX_TCP_SEG;
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
if ((ptype & RTE_PTYPE_L3_MASK) == RTE_PTYPE_L3_IPV4) {
- m->ol_flags |= PKT_TX_IPV4;
- m->ol_flags |= PKT_TX_IP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV4;
+ m->ol_flags |= RTE_MBUF_F_TX_IP_CKSUM;
ipv4_hdr = l3_hdr;
ipv4_hdr->hdr_checksum = 0;
tcp_hdr->cksum = rte_ipv4_phdr_cksum(l3_hdr, m->ol_flags);
} else { /* assume ethertype == RTE_ETHER_TYPE_IPV6 */
- m->ol_flags |= PKT_TX_IPV6;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV6;
tcp_hdr->cksum = rte_ipv6_phdr_cksum(l3_hdr, m->ol_flags);
}
}
(vh->vlan_tci != vlan_tag_be))
vh->vlan_tci = vlan_tag_be;
} else {
- m->ol_flags |= PKT_TX_VLAN_PKT;
+ m->ol_flags |= RTE_MBUF_F_TX_VLAN;
/*
* Find the right seg to adjust the data len when offset is
m->vlan_tci = vlan_tag;
}
- if (m->ol_flags & PKT_RX_LRO)
+ if (m->ol_flags & RTE_MBUF_F_RX_LRO)
virtio_tx_offload(m);
tx_q->m_table[tx_q->len++] = m;
struct vhost_dev *vdev;
struct mbuf_table *tx_q;
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
+ RTE_LOG(INFO, VHOST_DATA, "Processing on Core %u started\n", lcore_id);
tx_q = &lcore_tx_queue[lcore_id];
for (i = 0; i < rte_lcore_count(); i++) {
/*
* Remove a device from the specific data core linked list and from the
- * main linked list. Synchonization occurs through the use of the
+ * main linked list. Synchronization occurs through the use of the
* lcore dev_removal_flag. Device is made volatile here to avoid re-ordering
* of dev->remove=1 which can cause an infinite loop in the rte_pause loop.
*/
* These callback allow devices to be added to the data core when configuration
* has been fully complete.
*/
-static const struct vhost_device_ops virtio_net_device_ops =
+static const struct rte_vhost_device_ops virtio_net_device_ops =
{
.new_device = new_device,
.destroy_device = destroy_device,