#include "main.h"
-#define MAX_QUEUES 128
+#define MAX_QUEUES 256
/* the maximum number of external ports supported */
#define MAX_SUP_PORTS 1
#define MAC_ADDR_CMP 0xFFFFFFFFFFFFULL
/* Number of descriptors per cacheline. */
-#define DESC_PER_CACHELINE (CACHE_LINE_SIZE / sizeof(struct vring_desc))
+#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
+/* Promiscuous mode */
+static uint32_t promiscuous;
+
/*Number of switching cores enabled*/
static uint32_t num_switching_cores = 0;
static unsigned lcore_ids[RTE_MAX_LCORE];
static uint8_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports = 0; /**< The number of ports specified in command line */
+static uint16_t num_pf_queues, num_vmdq_queues;
+static uint16_t vmdq_pool_base, vmdq_queue_base;
+static uint16_t queues_per_pool;
static const uint16_t external_pkt_default_vlan_tag = 2000;
const uint16_t vlan_tags[] = {
get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
{
struct rte_eth_vmdq_rx_conf conf;
+ struct rte_eth_vmdq_rx_conf *def_conf =
+ &vmdq_conf_default.rx_adv_conf.vmdq_rx_conf;
unsigned i;
memset(&conf, 0, sizeof(conf));
conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
conf.nb_pool_maps = num_devices;
- conf.enable_loop_back =
- vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back;
+ conf.enable_loop_back = def_conf->enable_loop_back;
+ conf.rx_mode = def_conf->rx_mode;
for (i = 0; i < conf.nb_pool_maps; i++) {
conf.pool_map[i].vlan_id = vlan_tags[ i ];
/*configure the number of supported virtio devices based on VMDQ limits */
num_devices = dev_info.max_vmdq_pools;
- num_queues = dev_info.max_rx_queues;
if (zero_copy) {
rx_ring_size = num_rx_descriptor;
retval = get_eth_conf(&port_conf, num_devices);
if (retval < 0)
return retval;
+ /* NIC queues are divided into pf queues and vmdq queues. */
+ num_pf_queues = dev_info.max_rx_queues - dev_info.vmdq_queue_num;
+ queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
+ num_vmdq_queues = num_devices * queues_per_pool;
+ num_queues = num_pf_queues + num_vmdq_queues;
+ vmdq_queue_base = dev_info.vmdq_queue_base;
+ vmdq_pool_base = dev_info.vmdq_pool_base;
+ printf("pf queue num: %u, configured vmdq pool num: %u, each vmdq pool has %u queues\n",
+ num_pf_queues, num_devices, queues_per_pool);
if (port >= rte_eth_dev_count()) return -1;
- rx_rings = (uint16_t)num_queues,
+ rx_rings = (uint16_t)dev_info.max_rx_queues;
/* Configure ethernet device. */
retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
if (retval != 0)
return retval;
}
+ if (promiscuous)
+ rte_eth_promiscuous_enable(port);
+
rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
};
/* Parse command line */
- while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) {
+ while ((opt = getopt_long(argc, argv, "p:P",
+ long_option, &option_index)) != EOF) {
switch (opt) {
/* Portmask */
case 'p':
}
break;
+ case 'P':
+ promiscuous = 1;
+ vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.rx_mode =
+ ETH_VMDQ_ACCEPT_BROADCAST |
+ ETH_VMDQ_ACCEPT_MULTICAST;
+ rte_vhost_feature_enable(1ULL << VIRTIO_NET_F_CTRL_RX);
+
+ break;
+
case 0:
/* Enable/disable vm2vm comms. */
if (!strncmp(long_option[option_index].name, "vm2vm",
vdev->vlan_tag);
/* Register the MAC address. */
- ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address, (uint32_t)dev->device_fh);
+ ret = rte_eth_dev_mac_addr_add(ports[0], &vdev->mac_address,
+ (uint32_t)dev->device_fh + vmdq_pool_base);
if (ret)
RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
dev->device_fh);
}
if (vm2vm_mode == VM2VM_HARDWARE) {
- if (find_local_dest(dev, m, &offset, &vlan_tag) != 0) {
+ if (find_local_dest(dev, m, &offset, &vlan_tag) != 0 ||
+ offset > rte_pktmbuf_tailroom(m)) {
rte_pktmbuf_free(m);
return;
}
/* Buffer address translation. */
buff_addr = gpa_to_vva(dev, desc->addr);
- phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len, &addr_type);
+ /* Need check extra VLAN_HLEN size for inserting VLAN tag */
+ phys_addr = gpa_to_hpa(vdev, desc->addr, desc->len + VLAN_HLEN,
+ &addr_type);
if (likely(packet_success < (free_entries - 1)))
/* Prefetch descriptor index. */
struct vhost_dev *vdev;
uint32_t regionidx;
- vdev = rte_zmalloc("vhost device", sizeof(*vdev), CACHE_LINE_SIZE);
+ vdev = rte_zmalloc("vhost device", sizeof(*vdev), RTE_CACHE_LINE_SIZE);
if (vdev == NULL) {
RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Couldn't allocate memory for vhost dev\n",
dev->device_fh);
vdev->regions_hpa = (struct virtio_memory_regions_hpa *) rte_zmalloc("vhost hpa region",
sizeof(struct virtio_memory_regions_hpa) * vdev->nregions_hpa,
- CACHE_LINE_SIZE);
+ RTE_CACHE_LINE_SIZE);
if (vdev->regions_hpa == NULL) {
RTE_LOG(ERR, VHOST_CONFIG, "Cannot allocate memory for hpa region\n");
rte_free(vdev);
ll_dev->vdev = vdev;
add_data_ll_entry(&ll_root_used, ll_dev);
vdev->vmdq_rx_q
- = dev->device_fh * (num_queues / num_devices);
+ = dev->device_fh * queues_per_pool + vmdq_queue_base;
if (zero_copy) {
uint32_t index = vdev->vmdq_rx_q;
* device is also registered here to handle the IOCTLs.
*/
int
-MAIN(int argc, char *argv[])
+main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool = NULL;
unsigned lcore_id, core_id = 0;
unsigned nb_ports, valid_num_ports;
int ret;
- uint8_t portid, queue_id = 0;
+ uint8_t portid;
+ uint16_t queue_id;
static pthread_t tid;
/* init EAL */
}
LOG_DEBUG(VHOST_CONFIG,
- "in MAIN: mbuf count in mempool at initial "
+ "in main: mbuf count in mempool at initial "
"is: %d\n", count_in_mempool);
LOG_DEBUG(VHOST_CONFIG,
- "in MAIN: mbuf count in ring at initial is :"
+ "in main: mbuf count in ring at initial is :"
" %d\n",
rte_ring_count(vpool_array[index].ring));
}