/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_string_fns.h>
#include <rte_malloc.h>
#include <rte_virtio_net.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
#include "main.h"
-#define MAX_QUEUES 512
+#ifndef MAX_QUEUES
+#define MAX_QUEUES 128
+#endif
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
(num_switching_cores*MAX_PKT_BURST) + \
(num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
- (num_switching_cores*MBUF_CACHE_SIZE))
+ ((num_switching_cores+1)*MBUF_CACHE_SIZE))
#define MBUF_CACHE_SIZE 128
#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
static uint32_t enable_stats = 0;
/* Enable retries on RX. */
static uint32_t enable_retry = 1;
+
+/* Disable TX checksum offload */
+static uint32_t enable_tx_csum;
+
+/* Disable TSO offload */
+static uint32_t enable_tso;
+
/* Specify timeout (in useconds) between retries on RX. */
static uint32_t burst_rx_delay_time = BURST_RX_WAIT_US;
/* Specify the number of retries on RX. */
__be16 h_vlan_encapsulated_proto;
};
-/* IPv4 Header */
-struct ipv4_hdr {
- uint8_t version_ihl; /**< version and header length */
- uint8_t type_of_service; /**< type of service */
- uint16_t total_length; /**< length of packet */
- uint16_t packet_id; /**< packet ID */
- uint16_t fragment_offset; /**< fragmentation offset */
- uint8_t time_to_live; /**< time to live */
- uint8_t next_proto_id; /**< protocol ID */
- uint16_t hdr_checksum; /**< header checksum */
- uint32_t src_addr; /**< source address */
- uint32_t dst_addr; /**< destination address */
-} __attribute__((__packed__));
-
/* Header lengths. */
#define VLAN_HLEN 4
#define VLAN_ETH_HLEN 18
if (port >= rte_eth_dev_count()) return -1;
+ if (enable_tx_csum == 0)
+ rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_CSUM);
+
+ if (enable_tso == 0) {
+ rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO4);
+ rte_vhost_feature_disable(1ULL << VIRTIO_NET_F_HOST_TSO6);
+ }
+
rx_rings = (uint16_t)dev_info.max_rx_queues;
/* Configure ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
" --rx-desc-num [0-N]: the number of descriptors on rx, "
"used only when zero copy is enabled.\n"
" --tx-desc-num [0-N]: the number of descriptors on tx, "
- "used only when zero copy is enabled.\n",
+ "used only when zero copy is enabled.\n"
+ " --tx-csum [0|1] disable/enable TX checksum offload.\n"
+ " --tso [0|1] disable/enable TCP segment offload.\n",
prgname);
}
{"zero-copy", required_argument, NULL, 0},
{"rx-desc-num", required_argument, NULL, 0},
{"tx-desc-num", required_argument, NULL, 0},
+ {"tx-csum", required_argument, NULL, 0},
+ {"tso", required_argument, NULL, 0},
{NULL, 0, 0, 0},
};
}
}
+ /* Enable/disable TX checksum offload. */
+ if (!strncmp(long_option[option_index].name, "tx-csum", MAX_LONG_OPT_SZ)) {
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tx-csum [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
+ } else
+ enable_tx_csum = ret;
+ }
+
+ /* Enable/disable TSO offload. */
+ if (!strncmp(long_option[option_index].name, "tso", MAX_LONG_OPT_SZ)) {
+ ret = parse_num_opt(optarg, 1);
+ if (ret == -1) {
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for tso [0|1]\n");
+ us_vhost_usage(prgname);
+ return -1;
+ } else
+ enable_tso = ret;
+ }
+
/* Specify the retries delay time (in useconds) on RX. */
if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, INT32_MAX);
static inline int __attribute__((always_inline))
ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
{
- return (((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0);
+ return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
}
/*
rte_atomic64_add(
&dev_statistics[tdev->device_fh].rx_atomic,
ret);
- dev_statistics[tdev->device_fh].tx_total++;
- dev_statistics[tdev->device_fh].tx += ret;
+ dev_statistics[dev->device_fh].tx_total++;
+ dev_statistics[dev->device_fh].tx += ret;
}
}
return 0;
}
+static uint16_t
+get_psd_sum(void *l3_hdr, uint64_t ol_flags)
+{
+ if (ol_flags & PKT_TX_IPV4)
+ return rte_ipv4_phdr_cksum(l3_hdr, ol_flags);
+ else /* assume ethertype == ETHER_TYPE_IPv6 */
+ return rte_ipv6_phdr_cksum(l3_hdr, ol_flags);
+}
+
+static void virtio_tx_offload(struct rte_mbuf *m)
+{
+ void *l3_hdr;
+ struct ipv4_hdr *ipv4_hdr = NULL;
+ struct tcp_hdr *tcp_hdr = NULL;
+ struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
+
+ l3_hdr = (char *)eth_hdr + m->l2_len;
+
+ if (m->ol_flags & PKT_TX_IPV4) {
+ ipv4_hdr = l3_hdr;
+ ipv4_hdr->hdr_checksum = 0;
+ m->ol_flags |= PKT_TX_IP_CKSUM;
+ }
+
+ tcp_hdr = (struct tcp_hdr *)((char *)l3_hdr + m->l3_len);
+ tcp_hdr->cksum = get_psd_sum(l3_hdr, m->ol_flags);
+}
+
/*
* This function routes the TX packet to the correct interface. This may be a local device
* or the physical port.
(vh->vlan_tci != vlan_tag_be))
vh->vlan_tci = vlan_tag_be;
} else {
- m->ol_flags = PKT_TX_VLAN_PKT;
+ m->ol_flags |= PKT_TX_VLAN_PKT;
/*
* Find the right seg to adjust the data len when offset is
m->vlan_tci = vlan_tag;
}
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ virtio_tx_offload(m);
+
tx_q->m_table[len] = m;
len++;
if (enable_stats) {
rte_pktmbuf_free(pkts_burst[--tx_count]);
}
}
- while (tx_count)
- virtio_tx_route(vdev, pkts_burst[--tx_count], (uint16_t)dev->device_fh);
+ for (i = 0; i < tx_count; ++i) {
+ virtio_tx_route(vdev, pkts_burst[i],
+ vlan_tags[(uint16_t)dev->device_fh]);
+ }
}
/*move to the next device in the list*/
uint64_t buff_addr, phys_addr;
struct vhost_virtqueue *vq;
struct vring_desc *desc;
- struct rte_mbuf *mbuf = NULL;
+ void *obj = NULL;
+ struct rte_mbuf *mbuf;
struct vpool *vpool;
hpa_type addr_type;
struct vhost_dev *vdev = (struct vhost_dev *)dev->priv;
}
} while (unlikely(phys_addr == 0));
- rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
+ rte_ring_sc_dequeue(vpool->ring, &obj);
+ mbuf = obj;
if (unlikely(mbuf == NULL)) {
LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in attach_rxmbuf_zcp: "
"size required: %d\n",
dev->device_fh, desc->len, desc_idx, vpool->buf_size);
put_desc_to_used_list_zcp(vq, desc_idx);
- rte_ring_sp_enqueue(vpool->ring, (void *)mbuf);
+ rte_ring_sp_enqueue(vpool->ring, obj);
return;
}
{
struct mbuf_table *tx_q;
struct rte_mbuf **m_table;
- struct rte_mbuf *mbuf = NULL;
+ void *obj = NULL;
+ struct rte_mbuf *mbuf;
unsigned len, ret, offset = 0;
struct vpool *vpool;
uint16_t vlan_tag = (uint16_t)vlan_tags[(uint16_t)dev->device_fh];
/* Allocate an mbuf and populate the structure. */
vpool = &vpool_array[MAX_QUEUES + vmdq_rx_q];
- rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
+ rte_ring_sc_dequeue(vpool->ring, &obj);
+ mbuf = obj;
if (unlikely(mbuf == NULL)) {
struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
RTE_LOG(ERR, VHOST_DATA,
mbuf->buf_physaddr = m->buf_physaddr;
mbuf->buf_addr = m->buf_addr;
}
- mbuf->ol_flags = PKT_TX_VLAN_PKT;
+ mbuf->ol_flags |= PKT_TX_VLAN_PKT;
mbuf->vlan_tci = vlan_tag;
mbuf->l2_len = sizeof(struct ether_hdr);
mbuf->l3_len = sizeof(struct ipv4_hdr);
}
ll_new[i].next = NULL;
- return (ll_new);
+ return ll_new;
}
/*
uint8_t portid;
uint16_t queue_id;
static pthread_t tid;
+ char thread_name[RTE_MAX_THREAD_NAME_LEN];
signal(SIGINT, sigint_handler);
memset(&dev_statistics, 0, sizeof(dev_statistics));
/* Enable stats if the user option is set. */
- if (enable_stats)
- pthread_create(&tid, NULL, (void*)print_stats, NULL );
+ if (enable_stats) {
+ ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE,
+ "Cannot create print-stats thread\n");
+
+ /* Set thread_name for aid in debugging. */
+ snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-stats");
+ ret = rte_thread_setname(tid, thread_name);
+ if (ret != 0)
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "Cannot set print-stats name\n");
+ }
/* Launch all data cores. */
if (zero_copy == 0) {