validate_num_devices(uint32_t max_nb_devices)
{
if (num_devices > max_nb_devices) {
- RTE_LOG(ERR, PORT, "invalid number of devices\n");
+ RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
return -1;
}
return 0;
/* Start the device. */
retval = rte_eth_dev_start(port);
if (retval < 0) {
- RTE_LOG(ERR, DATA, "Failed to start the device.\n");
+ RTE_LOG(ERR, VHOST_DATA, "Failed to start the device.\n");
return retval;
}
rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
- RTE_LOG(INFO, PORT, "Max virtio devices supported: %u\n", num_devices);
- RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
+ RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
+ RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
(unsigned)port,
vmdq_ports_eth_addr[port].addr_bytes[0],
static void
us_vhost_usage(const char *prgname)
{
- RTE_LOG(INFO, CONFIG, "%s [EAL options] -- -p PORTMASK\n"
+ RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK\n"
" --vm2vm [0|1|2]\n"
" --rx_retry [0|1] --mergeable [0|1] --stats [0-N]\n"
" --dev-basename <name> --dev-index [0-N]\n"
case 'p':
enabled_port_mask = parse_portmask(optarg);
if (enabled_port_mask == 0) {
- RTE_LOG(INFO, CONFIG, "Invalid portmask\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
us_vhost_usage(prgname);
return -1;
}
MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, (VM2VM_LAST - 1));
if (ret == -1) {
- RTE_LOG(INFO, CONFIG,
+ RTE_LOG(INFO, VHOST_CONFIG,
"Invalid argument for "
"vm2vm [0|1|2]\n");
us_vhost_usage(prgname);
if (!strncmp(long_option[option_index].name, "rx-retry", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, 1);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for rx-retry [0|1]\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry [0|1]\n");
us_vhost_usage(prgname);
return -1;
} else {
if (!strncmp(long_option[option_index].name, "rx-retry-delay", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, INT32_MAX);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-delay [0-N]\n");
us_vhost_usage(prgname);
return -1;
} else {
if (!strncmp(long_option[option_index].name, "rx-retry-num", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, INT32_MAX);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for rx-retry-num [0-N]\n");
us_vhost_usage(prgname);
return -1;
} else {
if (!strncmp(long_option[option_index].name, "mergeable", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, 1);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for mergeable [0|1]\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for mergeable [0|1]\n");
us_vhost_usage(prgname);
return -1;
} else {
if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, INT32_MAX);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for stats [0..N]\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
us_vhost_usage(prgname);
return -1;
} else {
/* Set character device basename. */
if (!strncmp(long_option[option_index].name, "dev-basename", MAX_LONG_OPT_SZ)) {
if (us_vhost_parse_basename(optarg) == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device basename (Max %d characters)\n", MAX_BASENAME_SZ);
us_vhost_usage(prgname);
return -1;
}
if (!strncmp(long_option[option_index].name, "dev-index", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, INT32_MAX);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for character device index [0..N]\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for character device index [0..N]\n");
us_vhost_usage(prgname);
return -1;
} else
"zero-copy", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, 1);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG,
+ RTE_LOG(INFO, VHOST_CONFIG,
"Invalid argument"
" for zero-copy [0|1]\n");
us_vhost_usage(prgname);
if (zero_copy) {
#ifdef RTE_MBUF_SCATTER_GATHER
- RTE_LOG(ERR, CONFIG, "Before running "
+ RTE_LOG(ERR, VHOST_CONFIG, "Before running "
"zero copy vhost APP, please "
"disable RTE_MBUF_SCATTER_GATHER\n"
"in config file and then rebuild DPDK "
"rx-desc-num", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, MAX_RING_DESC);
if ((ret == -1) || (!POWEROF2(ret))) {
- RTE_LOG(INFO, CONFIG,
+ RTE_LOG(INFO, VHOST_CONFIG,
"Invalid argument for rx-desc-num[0-N],"
"power of 2 required.\n");
us_vhost_usage(prgname);
"tx-desc-num", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, MAX_RING_DESC);
if ((ret == -1) || (!POWEROF2(ret))) {
- RTE_LOG(INFO, CONFIG,
+ RTE_LOG(INFO, VHOST_CONFIG,
"Invalid argument for tx-desc-num [0-N],"
"power of 2 required.\n");
us_vhost_usage(prgname);
}
if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
- RTE_LOG(INFO, PORT, "Current enabled port number is %u,"
+ RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
return -1;
}
if ((zero_copy == 1) && (vm2vm_mode == VM2VM_SOFTWARE)) {
- RTE_LOG(INFO, PORT,
+ RTE_LOG(INFO, VHOST_PORT,
"Vhost zero copy doesn't support software vm2vm,"
"please specify 'vm2vm 2' to use hardware vm2vm.\n");
return -1;
unsigned portid;
if (num_ports > nb_ports) {
- RTE_LOG(INFO, PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
+ RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
num_ports, nb_ports);
num_ports = nb_ports;
}
for (portid = 0; portid < num_ports; portid ++) {
if (ports[portid] >= nb_ports) {
- RTE_LOG(INFO, PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
+ RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
ports[portid], (nb_ports - 1));
ports[portid] = INVALID_PORT_ID;
valid_num_ports--;
} \
rte_snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
\
- LOG_DEBUG(DATA, "%s", packet); \
+ LOG_DEBUG(VHOST_DATA, "%s", packet); \
} while(0)
#else
#define PRINT_PACKET(device, addr, size, header) do{} while(0)
break;
}
}
- LOG_DEBUG(DATA, "(%"PRIu64") GPA %p| VVA %p\n",
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n",
dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
return vhost_va;
}
}
- LOG_DEBUG(DATA, "(%"PRIu64") GPA %p| HPA %p\n",
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| HPA %p\n",
dev->device_fh, (void *)(uintptr_t)guest_pa,
(void *)(uintptr_t)vhost_pa);
uint16_t free_entries;
uint8_t success = 0;
- LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
vq = dev->virtqueue[VIRTIO_RXQ];
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/* As many data cores may want access to available buffers, they need to be reserved. */
res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- LOG_DEBUG(DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
/* Merge buffer can only handle so many buffers at a time. Tell the guest if this limit is reached. */
if ((mrg_count == MAX_MRG_PKT_BURST) || (res_cur_idx == res_end_idx)) {
virtio_hdr.num_buffers = mrg_count;
- LOG_DEBUG(DATA, "(%"PRIu64") RX: Num merge buffers %d\n", dev->device_fh, virtio_hdr.num_buffers);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") RX: Num merge buffers %d\n", dev->device_fh, virtio_hdr.num_buffers);
rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void*)&virtio_hdr, vq->vhost_hlen);
PRINT_PACKET(dev, (uintptr_t)buff_hdr_addr, vq->vhost_hlen, 1);
mrg_count = 0;
while (dev_ll != NULL) {
if (ether_addr_cmp(&(pkt_hdr->s_addr), &dev_ll->dev->mac_address)) {
- RTE_LOG(INFO, DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
return -1;
}
dev_ll = dev_ll->next;
dev->vlan_tag = vlan_tags[dev->device_fh];
/* Print out VMDQ registration info. */
- RTE_LOG(INFO, DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
dev->device_fh,
dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],
dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],
/* Register the MAC address. */
ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);
if (ret)
- RTE_LOG(ERR, DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
+ RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
dev->device_fh);
/* Enable stripping of the vlan tag as we handle routing. */
/* Drop the packet if the TX packet is destined for the TX device. */
if (dev_ll->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
dev_ll->dev->device_fh);
return 0;
}
- LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
if (dev_ll->dev->remove) {
/*drop the packet if the device is marked for removal*/
- LOG_DEBUG(DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
} else {
/*send the packet to the local virtio device*/
ret = virtio_dev_rx(dev_ll->dev, &m, 1);
* destined for the TX device.
*/
if (dev_ll->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") TX: Source and destination"
" MAC addresses are the same. Dropping "
"packet.\n",
(uint16_t)
vlan_tags[(uint16_t)dev_ll->dev->device_fh];
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") TX: pkt to local VM device id:"
"(%"PRIu64") vlan tag: %d.\n",
dev->device_fh, dev_ll->dev->device_fh,
}
}
- LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
/* Allocate an mbuf and populate the structure. */
mbuf = rte_pktmbuf_alloc(mbuf_pool);
if (unlikely(mbuf == NULL)) {
- RTE_LOG(ERR, DATA, "Failed to allocate memory for mbuf.\n");
+ RTE_LOG(ERR, VHOST_DATA, "Failed to allocate memory for mbuf.\n");
return;
}
if (vq->last_used_idx == avail_idx)
return;
- LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
if (free_entries > MAX_PKT_BURST)
free_entries = MAX_PKT_BURST;
- LOG_DEBUG(DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
const uint16_t num_cores = (uint16_t)rte_lcore_count();
uint16_t rx_count = 0;
- RTE_LOG(INFO, DATA, "Procesing on Core %u started \n", lcore_id);
+ RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id);
lcore_ll = lcore_info[lcore_id].lcore_ll;
prev_tsc = 0;
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
+ LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
/*Tx any packets in the queue*/
ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
free_entries = (avail_idx - *res_base_idx);
- LOG_DEBUG(DATA, "(%"PRIu64") in get_available_ring_index_zcp: "
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") in get_available_ring_index_zcp: "
"avail idx: %d, "
"res base idx:%d, free entries:%d\n",
dev->device_fh, avail_idx, *res_base_idx,
count = free_entries;
if (unlikely(count == 0)) {
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") Fail in get_available_ring_index_zcp: "
"avail idx: %d, res base idx:%d, free entries:%d\n",
dev->device_fh, avail_idx,
}
if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
- RTE_LOG(ERR, DATA, "(%"PRIu64") Invalid frame buffer"
+ RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Invalid frame buffer"
" address found when attaching RX frame buffer"
" address!\n", dev->device_fh);
put_desc_to_used_list_zcp(vq, desc_idx);
* sub-region or not.
*/
if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
- RTE_LOG(ERR, DATA,
+ RTE_LOG(ERR, VHOST_DATA,
"(%"PRIu64") Frame buffer address cross "
"sub-regioin found when attaching RX frame "
"buffer address!\n",
rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
if (unlikely(mbuf == NULL)) {
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in attach_rxmbuf_zcp: "
"ring_sc_dequeue fail.\n",
dev->device_fh);
}
if (unlikely(vpool->buf_size > desc->len)) {
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in attach_rxmbuf_zcp: frame buffer "
"length(%d) of descriptor idx: %d less than room "
"size required: %d\n",
mbuf->pkt.data_len = desc->len;
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in attach_rxmbuf_zcp: res base idx:%d, "
"descriptor idx:%d\n",
dev->device_fh, res_base_idx, desc_idx);
uint32_t index = 0;
uint32_t mbuf_count = rte_mempool_count(vpool->pool);
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool before "
"clean is: %d\n",
dev->device_fh, mbuf_count);
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring before "
"clean is : %d\n",
dev->device_fh, rte_ring_count(vpool->ring));
used_idx = (used_idx + 1) & (vq->size - 1);
}
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: mbuf count in mempool after "
"clean is: %d\n",
dev->device_fh, rte_mempool_count(vpool->pool));
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: mbuf count in ring after "
"clean is : %d\n",
dev->device_fh, rte_ring_count(vpool->ring));
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: before updated "
"vq->last_used_idx:%d\n",
dev->device_fh, vq->last_used_idx);
vq->last_used_idx += mbuf_count;
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in txmbuf_clean_zcp: after updated "
"vq->last_used_idx:%d\n",
dev->device_fh, vq->last_used_idx);
struct rte_mbuf *mbuf = NULL;
uint32_t index, mbuf_count = rte_mempool_count(vpool->pool);
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in mbuf_destroy_zcp: mbuf count in mempool before "
"mbuf_destroy_zcp is: %d\n",
mbuf_count);
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in mbuf_destroy_zcp: mbuf count in ring before "
"mbuf_destroy_zcp is : %d\n",
rte_ring_count(vpool->ring));
}
}
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in mbuf_destroy_zcp: mbuf count in mempool after "
"mbuf_destroy_zcp is: %d\n",
rte_mempool_count(vpool->pool));
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in mbuf_destroy_zcp: mbuf count in ring after "
"mbuf_destroy_zcp is : %d\n",
rte_ring_count(vpool->ring));
uint32_t head_idx, packet_success = 0;
uint16_t res_cur_idx;
- LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
if (count == 0)
return 0;
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
res_cur_idx = vq->last_used_idx;
- LOG_DEBUG(DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n",
dev->device_fh, res_cur_idx, res_cur_idx + count);
/* Retrieve all of the head indexes first to avoid caching issues. */
desc = &vq->desc[head[packet_success]];
buff = pkts[packet_success];
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in dev_rx_zcp: update the used idx for "
"pkt[%d] descriptor idx: %d\n",
dev->device_fh, packet_success,
rte_compiler_barrier();
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in dev_rx_zcp: before update used idx: "
"vq.last_used_idx: %d, vq->used->idx: %d\n",
dev->device_fh, vq->last_used_idx, vq->used->idx);
*(volatile uint16_t *)&vq->used->idx += count;
vq->last_used_idx += count;
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in dev_rx_zcp: after update used idx: "
"vq.last_used_idx: %d, vq->used->idx: %d\n",
dev->device_fh, vq->last_used_idx, vq->used->idx);
rte_ring_sc_dequeue(vpool->ring, (void **)&mbuf);
if (unlikely(mbuf == NULL)) {
struct vhost_virtqueue *vq = dev->virtqueue[VIRTIO_TXQ];
- RTE_LOG(ERR, DATA,
+ RTE_LOG(ERR, VHOST_DATA,
"(%"PRIu64") Failed to allocate memory for mbuf.\n",
dev->device_fh);
put_desc_to_used_list_zcp(vq, desc_idx);
*/
if (unlikely(dev_ll->dev->device_fh
== dev->device_fh)) {
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") TX: Source and destination"
"MAC addresses are the same. Dropping "
"packet.\n",
(uint16_t)
vlan_tags[(uint16_t)dev_ll->dev->device_fh];
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") TX: pkt to local VM device id:"
"(%"PRIu64") vlan tag: %d.\n",
dev->device_fh, dev_ll->dev->device_fh,
tx_q->m_table[len] = mbuf;
len++;
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"(%"PRIu64") in tx_route_zcp: pkt: nb_seg: %d, next:%s\n",
dev->device_fh,
mbuf->pkt.nb_segs,
if (vq->last_used_idx_res == avail_idx)
return;
- LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
rte_prefetch0(&vq->avail->ring[vq->last_used_idx_res & (vq->size - 1)]);
free_entries
= (free_entries > MAX_PKT_BURST) ? MAX_PKT_BURST : free_entries;
- LOG_DEBUG(DATA, "(%"PRIu64") Buffers available %d\n",
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n",
dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
rte_prefetch0(&vq->desc[head[packet_success + 1]]);
if (unlikely(addr_type == PHYS_ADDR_INVALID)) {
- RTE_LOG(ERR, DATA,
+ RTE_LOG(ERR, VHOST_DATA,
"(%"PRIu64") Invalid frame buffer address found"
"when TX packets!\n",
dev->device_fh);
* sub-region or not.
*/
if (unlikely(addr_type == PHYS_ADDR_CROSS_SUBREG)) {
- RTE_LOG(ERR, DATA,
+ RTE_LOG(ERR, VHOST_DATA,
"(%"PRIu64") Frame buffer address cross "
"sub-regioin found when attaching TX frame "
"buffer address!\n",
const uint16_t lcore_id = rte_lcore_id();
uint16_t count_in_ring, rx_count = 0;
- RTE_LOG(INFO, DATA, "Procesing on Core %u started\n", lcore_id);
+ RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started\n", lcore_id);
lcore_ll = lcore_info[lcore_id].lcore_ll;
prev_tsc = 0;
if (likely(!dev->remove)) {
tx_q = &tx_queue_zcp[(uint16_t)dev->vmdq_rx_q];
if (tx_q->len) {
- LOG_DEBUG(DATA,
+ LOG_DEBUG(VHOST_DATA,
"TX queue drained after timeout"
" with burst size %u\n",
tx_q->len);
if (likely(ll_dev_last != NULL))
ll_dev_last->next = ll_dev->next;
else
- RTE_LOG(ERR, CONFIG, "Remove entry form ll failed.\n");
+ RTE_LOG(ERR, VHOST_CONFIG, "Remove entry form ll failed.\n");
}
/*
/* Malloc and then chain the linked list. */
ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
if (ll_new == NULL) {
- RTE_LOG(ERR, CONFIG, "Failed to allocate memory for ll_new.\n");
+ RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
return NULL;
}
RTE_LCORE_FOREACH_SLAVE(lcore) {
lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
if (lcore_info[lcore].lcore_ll == NULL) {
- RTE_LOG(ERR, CONFIG, "Failed to allocate memory for lcore_ll.\n");
+ RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
return -1;
}
}
if (ll_lcore_dev_cur == NULL) {
- RTE_LOG(ERR, CONFIG,
+ RTE_LOG(ERR, VHOST_CONFIG,
"(%"PRIu64") Failed to find the dev to be destroy.\n",
dev->device_fh);
return;
/* Decrement number of device on the lcore. */
lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;
- RTE_LOG(INFO, DATA, "(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
if (zero_copy) {
struct vpool *vpool = &vpool_array[dev->vmdq_rx_q];
/* Stop the RX queue. */
if (rte_eth_dev_rx_queue_stop(ports[0], dev->vmdq_rx_q) != 0) {
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") In destroy_device: Failed to stop "
"rx queue:%d\n",
dev->device_fh,
dev->vmdq_rx_q);
}
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") in destroy_device: Start put mbuf in "
"mempool back to ring for RX queue: %d\n",
dev->device_fh, dev->vmdq_rx_q);
/* Stop the TX queue. */
if (rte_eth_dev_tx_queue_stop(ports[0], dev->vmdq_rx_q) != 0) {
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") In destroy_device: Failed to "
"stop tx queue:%d\n",
dev->device_fh, dev->vmdq_rx_q);
vpool = &vpool_array[dev->vmdq_rx_q + MAX_QUEUES];
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") destroy_device: Start put mbuf in mempool "
"back to ring for TX queue: %d, dev:(%"PRIu64")\n",
dev->device_fh, (dev->vmdq_rx_q + MAX_QUEUES),
/* Add device to main ll */
ll_dev = get_data_ll_free_entry(&ll_root_free);
if (ll_dev == NULL) {
- RTE_LOG(INFO, DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
"of %d devices per core has been reached\n",
dev->device_fh, num_devices);
return -1;
count_in_ring = rte_ring_count(vpool_array[index].ring);
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") in new_device: mbuf count in mempool "
"before attach is: %d\n",
dev->device_fh,
rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") in new_device: mbuf count in ring "
"before attach is : %d\n",
dev->device_fh, count_in_ring);
for (i = 0; i < count_in_ring; i++)
attach_rxmbuf_zcp(dev);
- LOG_DEBUG(CONFIG, "(%"PRIu64") in new_device: mbuf count in "
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
"mempool after attach is: %d\n",
dev->device_fh,
rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(CONFIG, "(%"PRIu64") in new_device: mbuf count in "
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") in new_device: mbuf count in "
"ring after attach is : %d\n",
dev->device_fh,
rte_ring_count(vpool_array[index].ring));
if (rte_eth_dev_tx_queue_start(ports[0], dev->vmdq_rx_q) != 0) {
struct vpool *vpool = &vpool_array[dev->vmdq_rx_q];
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") In new_device: Failed to start "
"tx queue:%d\n",
dev->device_fh, dev->vmdq_rx_q);
if (rte_eth_dev_rx_queue_start(ports[0], dev->vmdq_rx_q) != 0) {
struct vpool *vpool = &vpool_array[dev->vmdq_rx_q];
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") In new_device: Failed to start "
"rx queue:%d\n",
dev->device_fh, dev->vmdq_rx_q);
/* Stop the TX queue. */
if (rte_eth_dev_tx_queue_stop(ports[0],
dev->vmdq_rx_q) != 0) {
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"(%"PRIu64") In new_device: Failed to "
"stop tx queue:%d\n",
dev->device_fh, dev->vmdq_rx_q);
ll_dev->dev->coreid = core_add;
ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);
if (ll_dev == NULL) {
- RTE_LOG(INFO, DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
dev->ready = DEVICE_SAFE_REMOVE;
destroy_device(dev);
return -1;
lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;
dev->flags |= VIRTIO_DEV_RUNNING;
- RTE_LOG(INFO, DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
return 0;
}
rte_align32pow2(nb_mbuf + 1),
socket, RING_F_SP_ENQ | RING_F_SC_DEQ);
if (likely(vpool_array[index].ring != NULL)) {
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in setup_mempool_tbl: mbuf count in "
"mempool is: %d\n",
rte_mempool_count(vpool_array[index].pool));
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in setup_mempool_tbl: mbuf count in "
"ring is: %d\n",
rte_ring_count(vpool_array[index].ring));
valid_num_ports = check_ports_num(nb_ports);
if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
- RTE_LOG(INFO, PORT, "Current enabled port number is %u,"
+ RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
return -1;
}
if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"Enable loop back for L2 switch in vmdq.\n");
}
} else {
if (vm2vm_mode == VM2VM_HARDWARE) {
/* Enable VT loop back to let L2 switch to do it. */
vmdq_conf_default.rx_adv_conf.vmdq_rx_conf.enable_loop_back = 1;
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"Enable loop back for L2 switch in vmdq.\n");
}
}
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
- RTE_LOG(INFO, PORT,
+ RTE_LOG(INFO, VHOST_PORT,
"Skipping disabled port %d\n", portid);
continue;
}
(void *)mbuf);
}
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in MAIN: mbuf count in mempool at initial "
"is: %d\n", count_in_mempool);
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in MAIN: mbuf count in ring at initial is :"
" %d\n",
rte_ring_count(vpool_array[index].ring));
#endif
/* Macros for printing using RTE_LOG */
-#define RTE_LOGTYPE_CONFIG RTE_LOGTYPE_USER1
-#define RTE_LOGTYPE_DATA RTE_LOGTYPE_USER2
-#define RTE_LOGTYPE_PORT RTE_LOGTYPE_USER3
+#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
+#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
+#define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
/*
* Device linked list structure for data path.
fi->fh = err;
- RTE_LOG(INFO, CONFIG, "(%"PRIu64") Device configuration started\n", fi->fh);
+ RTE_LOG(INFO, VHOST_CONFIG, "(%"PRIu64") Device configuration started\n", fi->fh);
fuse_reply_open(req, fi);
}
struct vhost_device_ctx ctx = fuse_req_to_vhost_ctx(req, fi);
ops->destroy_device(ctx);
- RTE_LOG(INFO, CONFIG, "(%"PRIu64") Device released\n", ctx.fh);
+ RTE_LOG(INFO, VHOST_CONFIG, "(%"PRIu64") Device released\n", ctx.fh);
fuse_reply_err(req, err);
}
switch(cmd)
{
case VHOST_NET_SET_BACKEND:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_NET_SET_BACKEND\n", ctx.fh);
VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_backend);
break;
case VHOST_GET_FEATURES:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_FEATURES\n", ctx.fh);
VHOST_IOCTL_W(uint64_t, features, ops->get_features);
break;
case VHOST_SET_FEATURES:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_FEATURES\n", ctx.fh);
VHOST_IOCTL_R(uint64_t, features, ops->set_features);
break;
case VHOST_RESET_OWNER:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_RESET_OWNER\n", ctx.fh);
VHOST_IOCTL(ops->reset_owner);
break;
case VHOST_SET_OWNER:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_OWNER\n", ctx.fh);
VHOST_IOCTL(ops->set_owner);
break;
case VHOST_SET_MEM_TABLE:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_MEM_TABLE\n", ctx.fh);
static struct vhost_memory mem_temp;
switch(in_bufsz){
break;
case VHOST_SET_VRING_NUM:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_NUM\n", ctx.fh);
VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_num);
break;
case VHOST_SET_VRING_BASE:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_BASE\n", ctx.fh);
VHOST_IOCTL_R(struct vhost_vring_state, state, ops->set_vring_base);
break;
case VHOST_GET_VRING_BASE:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_GET_VRING_BASE\n", ctx.fh);
VHOST_IOCTL_RW(uint32_t, index, struct vhost_vring_state, state, ops->get_vring_base);
break;
case VHOST_SET_VRING_ADDR:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_ADDR\n", ctx.fh);
VHOST_IOCTL_R(struct vhost_vring_addr, addr, ops->set_vring_addr);
break;
case VHOST_SET_VRING_KICK:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_KICK\n", ctx.fh);
VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_kick);
break;
case VHOST_SET_VRING_CALL:
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: VHOST_SET_VRING_CALL\n", ctx.fh);
VHOST_IOCTL_R(struct vhost_vring_file, file, ops->set_vring_call);
break;
default:
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") IOCTL: DOESN NOT EXIST\n", ctx.fh);
result = -1;
fuse_reply_ioctl(req, result, NULL, 0);
}
if (result < 0) {
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: FAIL\n", ctx.fh);
} else {
- LOG_DEBUG(CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") IOCTL: SUCCESS\n", ctx.fh);
}
}
char *fuse_argv[] = {fuse_opt_dummy, fuse_opt_fore, fuse_opt_nomulti};
if (access(cuse_device_name, R_OK | W_OK) < 0) {
- RTE_LOG(ERR, CONFIG, "Character device %s can't be accessed, maybe not exist\n", cuse_device_name);
+ RTE_LOG(ERR, VHOST_CONFIG, "Character device %s can't be accessed, maybe not exist\n", cuse_device_name);
return -1;
}
/* Check if device already exists. */
if (access(char_device_name, F_OK) != -1) {
- RTE_LOG(ERR, CONFIG, "Character device %s already exists\n", char_device_name);
+ RTE_LOG(ERR, VHOST_CONFIG, "Character device %s already exists\n", char_device_name);
return -1;
}
fmap = fopen(mapfile, "r");
if (fmap == NULL) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to open maps file for pid %d\n", dev->device_fh, pid);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open maps file for pid %d\n", dev->device_fh, pid);
return -1;
}
fclose(fmap);
if (!found) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to find memory file in pid %d maps file\n", dev->device_fh, pid);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file in pid %d maps file\n", dev->device_fh, pid);
return -1;
}
/* Find the guest memory file among the process fds. */
dp = opendir(procdir);
if (dp == NULL) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Cannot open pid %d process directory \n", dev->device_fh, pid);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Cannot open pid %d process directory \n", dev->device_fh, pid);
return -1;
}
rte_snprintf (memfile, PATH_MAX, "/proc/%u/fd/%s", pid, dptr->d_name);
realpath(memfile, resolved_path);
if (resolved_path == NULL) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to resolve fd directory\n", dev->device_fh);
closedir(dp);
return -1;
}
closedir(dp);
if (found == 0) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to find memory file for pid %d\n", dev->device_fh, pid);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find memory file for pid %d\n", dev->device_fh, pid);
return -1;
}
/* Open the shared memory file and map the memory into this process. */
fd = open(memfile, O_RDWR);
if (fd == -1) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to open %s for pid %d\n", dev->device_fh, memfile, pid);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to open %s for pid %d\n", dev->device_fh, memfile, pid);
return -1;
}
close (fd);
if (map == MAP_FAILED) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n", dev->device_fh, memfile, pid);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Error mapping the file %s for pid %d\n", dev->device_fh, memfile, pid);
return -1;
}
mem->mapped_address = (uint64_t)(uintptr_t)map;
mem->mapped_size = procmap.len;
- LOG_DEBUG(CONFIG, "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n", dev->device_fh,
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mem File: %s->%s - Size: %llu - VA: %p\n", dev->device_fh,
memfile, resolved_path, (long long unsigned)mem->mapped_size, map);
return 0;
return &ll_dev->dev;
}
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Device not found in linked list.\n", ctx.fh);
return NULL;
}
} else {
cleanup_device(&ll_dev->dev);
free_device(ll_dev);
- RTE_LOG(ERR, CONFIG, "Remove entry from config_ll failed\n");
+ RTE_LOG(ERR, VHOST_CONFIG, "Remove entry from config_ll failed\n");
return NULL;
}
}
/*check the number of devices in the system*/
if (num_cur_devices == num_devices) {
- RTE_LOG(ERR, CONFIG, "() Max num devices (%u) exceeded\n", num_devices);
+ RTE_LOG(ERR, VHOST_CONFIG, "() Max num devices (%u) exceeded\n", num_devices);
return -1;
}
/* Setup device and virtqueues. */
new_ll_dev = malloc(sizeof(struct virtio_net_config_ll));
if (new_ll_dev == NULL) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to allocate memory for dev.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev.\n", ctx.fh);
return -1;
}
virtqueue_rx = malloc(sizeof(struct vhost_virtqueue));
if (virtqueue_rx == NULL) {
free(new_ll_dev);
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_rx.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_rx.\n", ctx.fh);
return -1;
}
if (virtqueue_tx == NULL) {
free(virtqueue_rx);
free(new_ll_dev);
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_tx.\n", ctx.fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for virtqueue_tx.\n", ctx.fh);
return -1;
}
/* Set the vhost_hlen depending on if VIRTIO_NET_F_MRG_RXBUF is set. */
if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
- LOG_DEBUG(CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers enabled\n", dev->device_fh);
dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
- LOG_DEBUG(CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Mergeable RX buffers disabled\n", dev->device_fh);
dev->virtqueue[VIRTIO_RXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);
dev->virtqueue[VIRTIO_TXQ]->vhost_hlen = sizeof(struct virtio_net_hdr);
}
uint32_t i, nregions = 0, page_size = PAGE_SIZE;
uint64_t cur_phys_addr = 0, next_phys_addr = 0;
if (vva_start % page_size) {
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in check_countinous: vva start(%p) mod page_size(%d) "
"has remainder\n",
(void *)(uintptr_t)vva_start, page_size);
return 0;
}
if (size % page_size) {
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in check_countinous: "
"size((%"PRIu64")) mod page_size(%d) has remainder\n",
size, page_size);
(void *)(uintptr_t)(vva_start + i + page_size));
if ((cur_phys_addr + page_size) != next_phys_addr) {
++nregions;
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in check_continuous: hva addr:(%p) is not "
"continuous with hva addr:(%p), diff:%d\n",
(void *)(uintptr_t)(vva_start + (uint64_t)i),
(void *)(uintptr_t)(vva_start + (uint64_t)i
+ page_size), page_size);
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in check_continuous: hpa addr:(%p) is not "
"continuous with hpa addr:(%p), "
"diff:(%"PRIu64")\n",
mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
rte_mem_virt2phy((void *)(uintptr_t)(vva_start))
- mem_region_hpa[regionidx_hpa].guest_phys_address;
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in fill_hpa_regions: guest phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address));
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in fill_hpa_regions: host phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
+ k + page_size;
mem_region_hpa[regionidx_hpa].memory_size
= k + page_size;
- LOG_DEBUG(CONFIG, "in fill_hpa_regions: guest "
+ LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest "
"phys addr end [%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in fill_hpa_regions: guest phys addr "
"size [%d]:(%p)\n",
regionidx_hpa,
mem_region_hpa[regionidx_hpa].host_phys_addr_offset =
next_phys_addr
- mem_region_hpa[regionidx_hpa].guest_phys_address;
- LOG_DEBUG(CONFIG, "in fill_hpa_regions: guest"
+ LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest"
" phys addr start[%d]:(%p)\n",
regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address));
- LOG_DEBUG(CONFIG,
+ LOG_DEBUG(VHOST_CONFIG,
"in fill_hpa_regions: host phys addr "
"start[%d]:(%p)\n",
regionidx_hpa,
= mem_region_hpa[regionidx_hpa].guest_phys_address
+ k + page_size;
mem_region_hpa[regionidx_hpa].memory_size = k + page_size;
- LOG_DEBUG(CONFIG, "in fill_hpa_regions: guest phys addr end "
+ LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr end "
"[%d]:(%p)\n", regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].guest_phys_address_end));
- LOG_DEBUG(CONFIG, "in fill_hpa_regions: guest phys addr size "
+ LOG_DEBUG(VHOST_CONFIG, "in fill_hpa_regions: guest phys addr size "
"[%d]:(%p)\n", regionidx_hpa,
(void *)(uintptr_t)
(mem_region_hpa[regionidx_hpa].memory_size));
/* Malloc the memory structure depending on the number of regions. */
mem = calloc(1, sizeof(struct virtio_memory) + (sizeof(struct virtio_memory_regions) * nregions));
if (mem == NULL) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to allocate memory for dev->mem.\n", dev->device_fh);
return -1;
}
mem->regions[regionidx].memory_size = mem_regions[regionidx].memory_size;
mem->regions[regionidx].userspace_address = mem_regions[regionidx].userspace_addr;
- LOG_DEBUG(CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") REGION: %u - GPA: %p - QEMU VA: %p - SIZE (%"PRIu64")\n", dev->device_fh,
regionidx, (void*)(uintptr_t)mem->regions[regionidx].guest_phys_address,
(void*)(uintptr_t)mem->regions[regionidx].userspace_address,
mem->regions[regionidx].memory_size);
/* Check that we have a valid base address. */
if (mem->base_address == 0) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to find base address of qemu memory file.\n", dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find base address of qemu memory file.\n", dev->device_fh);
free(mem);
return -1;
}
/* If a region does not have a valid mapping we rebuild our memory struct to contain only valid entries. */
if (valid_regions != mem->nregions) {
- LOG_DEBUG(CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") Not all memory regions exist in the QEMU mem file. Re-populating mem structure\n",
dev->device_fh);
/* Re-populate the memory structure with only valid regions. Invalid regions are over-written with memmove. */
(sizeof(struct virtio_memory_regions_hpa)
* dev->mem->nregions_hpa));
if (dev->mem->regions_hpa == NULL) {
- RTE_LOG(ERR, CONFIG,
+ RTE_LOG(ERR, VHOST_CONFIG,
"(%"PRIu64") Failed to allocate memory for "
"dev->mem->regions_hpa.\n", dev->device_fh);
return -1;
}
if (fill_hpa_memory_regions(
(void *)dev->mem) != dev->mem->nregions_hpa) {
- RTE_LOG(ERR, CONFIG,
+ RTE_LOG(ERR, VHOST_CONFIG,
"in set_mem_table: hpa memory regions number mismatch: "
"[%d]\n", dev->mem->nregions_hpa);
return -1;
/* The addresses are converted from QEMU virtual to Vhost virtual. */
vq->desc = (struct vring_desc*)(uintptr_t)qva_to_vva(dev, addr->desc_user_addr);
if (vq->desc == 0) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find descriptor ring address.\n", dev->device_fh);
return -1;
}
vq->avail = (struct vring_avail*)(uintptr_t)qva_to_vva(dev, addr->avail_user_addr);
if (vq->avail == 0) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find available ring address.\n", dev->device_fh);
return -1;
}
vq->used = (struct vring_used*)(uintptr_t)qva_to_vva(dev, addr->used_user_addr);
if (vq->used == 0) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") Failed to find used ring address.\n", dev->device_fh);
return -1;
}
- LOG_DEBUG(CONFIG, "(%"PRIu64") mapped address desc: %p\n", dev->device_fh, vq->desc);
- LOG_DEBUG(CONFIG, "(%"PRIu64") mapped address avail: %p\n", dev->device_fh, vq->avail);
- LOG_DEBUG(CONFIG, "(%"PRIu64") mapped address used: %p\n", dev->device_fh, vq->used);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address desc: %p\n", dev->device_fh, vq->desc);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address avail: %p\n", dev->device_fh, vq->avail);
+ LOG_DEBUG(VHOST_CONFIG, "(%"PRIu64") mapped address used: %p\n", dev->device_fh, vq->used);
return 0;
}
/* Open the character device to the kernel module. */
eventfd_link = open(eventfd_cdev, O_RDWR);
if (eventfd_link < 0) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") eventfd_link module is not loaded\n", dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") eventfd_link module is not loaded\n", dev->device_fh);
return -1;
}
close(eventfd_link);
if (ret < 0) {
- RTE_LOG(ERR, CONFIG, "(%"PRIu64") EVENTFD_COPY ioctl failed\n", dev->device_fh);
+ RTE_LOG(ERR, VHOST_CONFIG, "(%"PRIu64") EVENTFD_COPY ioctl failed\n", dev->device_fh);
return -1;
}
validate_num_devices(uint32_t max_nb_devices)
{
if (num_devices > max_nb_devices) {
- RTE_LOG(ERR, PORT, "invalid number of devices\n");
+ RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
return -1;
}
return 0;
return retval;
rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
- RTE_LOG(INFO, PORT, "Max virtio devices supported: %u\n", num_devices);
- RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
+ RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
+ RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
(unsigned)port,
vmdq_ports_eth_addr[port].addr_bytes[0],
static void
us_vhost_usage(const char *prgname)
{
- RTE_LOG(INFO, CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n"
+ RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n"
" -p PORTMASK: Set mask for ports to be used by application\n"
" --vm2vm [0|1]: disable/enable(default) vm2vm comms\n"
" --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n",
case 'p':
enabled_port_mask = parse_portmask(optarg);
if (enabled_port_mask == 0) {
- RTE_LOG(INFO, CONFIG, "Invalid portmask\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
us_vhost_usage(prgname);
return -1;
}
if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, 1);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for vm2vm [0|1]\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for vm2vm [0|1]\n");
us_vhost_usage(prgname);
return -1;
} else {
if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
ret = parse_num_opt(optarg, INT32_MAX);
if (ret == -1) {
- RTE_LOG(INFO, CONFIG, "Invalid argument for stats [0..N]\n");
+ RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
us_vhost_usage(prgname);
return -1;
} else {
}
if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
- RTE_LOG(INFO, PORT, "Current enabled port number is %u,"
+ RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
return -1;
}
unsigned portid;
if (num_ports > nb_ports) {
- RTE_LOG(INFO, PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
+ RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
num_ports, nb_ports);
num_ports = nb_ports;
}
for (portid = 0; portid < num_ports; portid ++) {
if (ports[portid] >= nb_ports) {
- RTE_LOG(INFO, PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
+ RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
ports[portid], (nb_ports - 1));
ports[portid] = INVALID_PORT_ID;
valid_num_ports--;
} \
rte_snprintf(packet + strnlen(packet, MAX_PRINT_BUFF), MAX_PRINT_BUFF - strnlen(packet, MAX_PRINT_BUFF), "\n"); \
\
- LOG_DEBUG(DATA, "%s", packet); \
+ LOG_DEBUG(VHOST_DATA, "%s", packet); \
} while(0)
#else
#define PRINT_PACKET(device, addr, size, header) do{} while(0)
break;
}
}
- LOG_DEBUG(DATA, "(%"PRIu64") GPA %p| VVA %p\n",
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") GPA %p| VVA %p\n",
dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
return vhost_va;
uint16_t free_entries;
uint8_t success = 0;
- LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_rx()\n", dev->device_fh);
vq = dev->virtqueue_rx;
count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
/* As many data cores may want access to available buffers, they need to be reserved. */
res_end_idx);
} while (unlikely(success == 0));
res_cur_idx = res_base_idx;
- LOG_DEBUG(DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Current Index %d| End Index %d\n", dev->device_fh, res_cur_idx, res_end_idx);
/* Prefetch available ring to retrieve indexes. */
rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
while (dev_ll != NULL) {
if ((dev != dev_ll->dev) && ether_addr_cmp(&dev->mac_address, &dev_ll->dev->mac_address)) {
- RTE_LOG(INFO, DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
return -1;
}
dev_ll = dev_ll->next;
dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices);
/* Print out VMDQ registration info. */
- RTE_LOG(INFO, DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
dev->device_fh,
dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],
dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],
/* Register the MAC address. */
ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);
if (ret) {
- RTE_LOG(ERR, DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
+ RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
dev->device_fh);
return -1;
}
/* Drop the packet if the TX packet is destined for the TX device. */
if (dev_ll->dev->device_fh == dev->device_fh) {
- LOG_DEBUG(DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: Source and destination MAC addresses are the same. Dropping packet.\n",
dev_ll->dev->device_fh);
return 0;
}
- LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is local\n", dev_ll->dev->device_fh);
if (dev_ll->dev->remove) {
/*drop the packet if the device is marked for removal*/
- LOG_DEBUG(DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Device is marked for removal\n", dev_ll->dev->device_fh);
} else {
/*send the packet to the local virtio device*/
ret = virtio_dev_rx(dev_ll->dev, &m, 1);
return;
}
- LOG_DEBUG(DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") TX: MAC address is external\n", dev->device_fh);
/*Add packet to the port tx queue*/
tx_q = &lcore_tx_queue[lcore_id];
if (vq->last_used_idx == avail_idx)
return;
- LOG_DEBUG(DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") virtio_dev_tx()\n", dev->device_fh);
/* Prefetch available ring to retrieve head indexes. */
rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
free_entries = avail_idx - vq->last_used_idx;
free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
- LOG_DEBUG(DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
+ LOG_DEBUG(VHOST_DATA, "(%"PRIu64") Buffers available %d\n", dev->device_fh, free_entries);
/* Retrieve all of the head indexes first to avoid caching issues. */
for (i = 0; i < free_entries; i++)
head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
const uint16_t num_cores = (uint16_t)rte_lcore_count();
uint16_t rx_count = 0;
- RTE_LOG(INFO, DATA, "Procesing on Core %u started \n", lcore_id);
+ RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id);
lcore_ll = lcore_info[lcore_id].lcore_ll;
prev_tsc = 0;
if (unlikely(diff_tsc > drain_tsc)) {
if (tx_q->len) {
- LOG_DEBUG(DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
+ LOG_DEBUG(VHOST_DATA, "TX queue drained after timeout with burst size %u \n", tx_q->len);
/*Tx any packets in the queue*/
ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
/* Malloc and then chain the linked list. */
ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
if (ll_new == NULL) {
- RTE_LOG(ERR, CONFIG, "Failed to allocate memory for ll_new.\n");
+ RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
return NULL;
}
RTE_LCORE_FOREACH_SLAVE(lcore) {
lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
if (lcore_info[lcore].lcore_ll == NULL) {
- RTE_LOG(ERR, CONFIG, "Failed to allocate memory for lcore_ll.\n");
+ RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
return -1;
}
/* Decrement number of device on the lcore. */
lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;
- RTE_LOG(INFO, DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
}
/*
/* Add device to main ll */
ll_dev = get_data_ll_free_entry(&ll_root_free);
if (ll_dev == NULL) {
- RTE_LOG(INFO, DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
"of %d devices per core has been reached\n",
dev->device_fh, num_devices);
return -1;
ll_dev->dev->coreid = core_add;
ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);
if (ll_dev == NULL) {
- RTE_LOG(INFO, DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
destroy_device(dev);
return -1;
}
lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;
dev->flags |= VIRTIO_DEV_RUNNING;
- RTE_LOG(INFO, DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
+ RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
link_vmdq(dev);
valid_num_ports = check_ports_num(nb_ports);
if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
- RTE_LOG(INFO, PORT, "Current enabled port number is %u,"
+ RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
"but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
return -1;
}
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((enabled_port_mask & (1 << portid)) == 0) {
- RTE_LOG(INFO, PORT, "Skipping disabled port %d\n", portid);
+ RTE_LOG(INFO, VHOST_PORT, "Skipping disabled port %d\n", portid);
continue;
}
if (port_init(portid, mbuf_pool) != 0)
#endif
/* Macros for printing using RTE_LOG */
-#define RTE_LOGTYPE_CONFIG RTE_LOGTYPE_USER1
-#define RTE_LOGTYPE_DATA RTE_LOGTYPE_USER2
-#define RTE_LOGTYPE_PORT RTE_LOGTYPE_USER3
+#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
+#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
+#define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
/*
* Device linked list structure for data path.