ctrl->status = status;
if (!vq->hw->cvq) {
- PMD_INIT_LOG(ERR, "%s(): Control queue is "
- "not supported by this device.\n", __func__);
+ PMD_INIT_LOG(ERR,
+ "%s(): Control queue is not supported.\n",
+ __func__);
return -1;
}
nb_desc = vq_size;
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "%s: virtqueue does not exist\n", __func__);
- return (-EINVAL);
+ return -EINVAL;
} else if (!rte_is_power_of_2(vq_size)) {
PMD_INIT_LOG(ERR, "%s: virtqueue size is not powerof 2\n", __func__);
- return (-EINVAL);
+ return -EINVAL;
} else if (nb_desc != vq_size) {
PMD_INIT_LOG(ERR, "Warning: nb_desc(%d) is not equal to vq size (%d), fall to vq size\n",
nb_desc, vq_size);
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
- } else if(queue_type == VTNET_TQ) {
+ } else if (queue_type == VTNET_TQ) {
rte_snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d",
dev->data->port_id, queue_idx);
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
vq_size * sizeof(struct vq_desc_extra), CACHE_LINE_SIZE);
memcpy(vq->vq_name, vq_name, sizeof(vq->vq_name));
- } else if(queue_type == VTNET_CQ) {
+ } else if (queue_type == VTNET_CQ) {
rte_snprintf(vq_name, sizeof(vq_name), "port%d_cvq",
dev->data->port_id);
vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) +
socket_id, 0, VIRTIO_PCI_VRING_ALIGN);
if (mz == NULL) {
rte_free(vq);
- return (-ENOMEM);
+ return -ENOMEM;
}
/*
* and only accepts 32 bit page frame number.
* Check if the allocated physical memory exceeds 16TB.
*/
- if ( (mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32) ) {
+ if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) {
PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!\n");
rte_free(vq);
- return (-ENOMEM);
+ return -ENOMEM;
}
memset(mz->addr, 0, sizeof(mz->len));
/* meaningfull only to multiple queue */
.tx_queue_release = virtio_dev_tx_queue_release,
/* collect stats per queue */
- .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set
+ .queue_stats_mapping_set = virtio_dev_queue_stats_mapping_set,
};
static inline int
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
*(uint64_t *)src) == 0)
- return (-1);
+ return -1;
- return (0);
+ return 0;
}
/**
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
*(uint64_t *)src) == 0)
- return (-1);
+ return -1;
- return (0);
+ return 0;
}
static void
{
struct virtio_hw *hw =
VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- if(stats)
+ if (stats)
memcpy(stats, &hw->eth_stats, sizeof(*stats));
}
virtio_negotiate_features(struct virtio_hw *hw)
{
uint32_t guest_features, mask;
+
mask = VIRTIO_NET_F_CTRL_RX | VIRTIO_NET_F_CTRL_VLAN;
- mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM ;
+ mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM;
/* TSO and LRO are only available when their corresponding
* checksum offload feature is also negotiated.
char buf[BUFSIZ];
char *end = NULL;
- if ((f = fopen(filename, "r")) == NULL) {
+ f = fopen(filename, "r");
+ if (f == NULL) {
PMD_INIT_LOG(ERR, "%s(): cannot open sysfs value %s\n",
__func__, filename);
return -1;
/* depending on kernel version, uio can be located in uio/uioX
* or uio:uioX */
rte_snprintf(dirname, sizeof(dirname),
- SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
- loc->domain, loc->bus, loc->devid, loc->function);
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/uio",
+ loc->domain, loc->bus, loc->devid, loc->function);
dir = opendir(dirname);
if (dir == NULL) {
/* retry with the parent directory */
rte_snprintf(dirname, sizeof(dirname),
- SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
- loc->domain, loc->bus, loc->devid, loc->function);
+ SYSFS_PCI_DEVICES "/" PCI_PRI_FMT,
+ loc->domain, loc->bus, loc->devid, loc->function);
dir = opendir(dirname);
if (dir == NULL) {
struct virtio_hw *hw =
VIRTIO_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- if (RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr) ) {
+ if (RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr)) {
PMD_INIT_LOG(ERR,
"MBUF HEADROOM should be enough to hold virtio net hdr\n");
- return (-1);
+ return -1;
}
- if (! (rte_eal_get_configuration()->flags & EAL_FLG_HIGH_IOPL)) {
+ if (!(rte_eal_get_configuration()->flags & EAL_FLG_HIGH_IOPL)) {
PMD_INIT_LOG(ERR,
"IOPL call failed in EAL init - cannot use virtio PMD driver\n");
- return (-1);
+ return -1;
}
eth_dev->dev_ops = &virtio_eth_dev_ops;
eth_dev->rx_pkt_burst = &virtio_recv_pkts;
eth_dev->tx_pkt_burst = &virtio_xmit_pkts;
- if(rte_eal_process_type() == RTE_PROC_SECONDARY)
+ if (rte_eal_process_type() == RTE_PROC_SECONDARY)
return 0;
pci_dev = eth_dev->pci_dev;
{
char dirname[PATH_MAX];
char filename[PATH_MAX];
- unsigned long start,size;
+ unsigned long start, size;
if (get_uio_dev(&pci_dev->addr, dirname, sizeof(dirname)) < 0)
return -1;
}
pci_dev->mem_resource[0].addr = (void *)(uintptr_t)start;
pci_dev->mem_resource[0].len = (uint64_t)size;
- PMD_INIT_LOG(DEBUG, "PCI Port IO found start=0x%lx with "
- "size=0x%lx\n", start, size);
+ PMD_INIT_LOG(DEBUG,
+ "PCI Port IO found start=0x%lx with size=0x%lx\n",
+ start, size);
}
#endif
hw->io_base = (uint32_t)(uintptr_t)pci_dev->mem_resource[0].addr;
virtio_negotiate_features(hw);
/* Setting up rx_header size for the device */
- if(vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
PMD_INIT_LOG(ERR,
"Failed to allocate %d bytes needed to store MAC addresses",
ETHER_ADDR_LEN);
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Copy the permanent MAC address to: virtio_hw */
virtio_get_hwaddr(hw);
ether_addr_copy((struct ether_addr *) hw->mac_addr,
ð_dev->data->mac_addrs[0]);
- PMD_INIT_LOG(DEBUG, "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X\n", hw->mac_addr[0],
- hw->mac_addr[1],hw->mac_addr[2], hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
+ PMD_INIT_LOG(DEBUG,
+ "PORT MAC: %02X:%02X:%02X:%02X:%02X:%02X\n",
+ hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
+ hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
if (vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VQ)) {
config = &local_config;
if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
offset_conf += sizeof(config->status);
} else {
- PMD_INIT_LOG(DEBUG, "VIRTIO_NET_F_STATUS is "
- "not supported\n");
+ PMD_INIT_LOG(DEBUG,
+ "VIRTIO_NET_F_STATUS is not supported\n");
config->status = 0;
}
if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
offset_conf += sizeof(config->max_virtqueue_pairs);
} else {
- PMD_INIT_LOG(DEBUG, "VIRTIO_NET_F_MQ is "
- "not supported\n");
+ PMD_INIT_LOG(DEBUG,
+ "VIRTIO_NET_F_MQ is not supported\n");
config->max_virtqueue_pairs = 1;
}
rte_virtio_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
{
rte_eth_driver_register(&rte_virtio_pmd);
- return (0);
+ return 0;
}
/*
static int
virtio_dev_configure(__rte_unused struct rte_eth_dev *dev)
{
- return (0);
+ return 0;
}
virtio_dev_rxtx_start(dev);
/* Check VIRTIO_NET_F_STATUS for link status*/
- if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
- if((status & VIRTIO_NET_S_LINK_UP) == 0) {
+ if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
PMD_INIT_LOG(ERR, "Port: %d Link is DOWN\n", dev->data->port_id);
- return (-EIO);
+ return -EIO;
} else {
PMD_INIT_LOG(DEBUG, "Port: %d Link is UP\n", dev->data->port_id);
}
{
struct rte_mbuf *buf;
int i, mbuf_num = 0;
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- PMD_INIT_LOG(DEBUG, "Before freeing rxq[%d] used and "
- "unused buf\n", i);
+ PMD_INIT_LOG(DEBUG,
+ "Before freeing rxq[%d] used and unused buf\n", i);
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused(
}
PMD_INIT_LOG(DEBUG, "free %d mbufs\n", mbuf_num);
- PMD_INIT_LOG(DEBUG, "After freeing rxq[%d] used and "
- "unused buf\n", i);
+ PMD_INIT_LOG(DEBUG,
+ "After freeing rxq[%d] used and unused buf\n", i);
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- PMD_INIT_LOG(DEBUG, "Before freeing txq[%d] used and "
- "unused bufs\n", i);
+ PMD_INIT_LOG(DEBUG,
+ "Before freeing txq[%d] used and unused bufs\n",
+ i);
VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]);
mbuf_num = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_read_link_status(dev, &link);
old = link;
- link.link_duplex = FULL_DUPLEX ;
- link.link_speed = SPEED_10G ;
- if(vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ link.link_duplex = FULL_DUPLEX;
+ link.link_speed = SPEED_10G;
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw\n");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
- if((status & VIRTIO_NET_S_LINK_UP) == 0) {
+ if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
link.link_status = 0;
- PMD_INIT_LOG(DEBUG, "Port %d is down\n",dev->data->port_id);
+ PMD_INIT_LOG(DEBUG, "Port %d is down\n",
+ dev->data->port_id);
} else {
link.link_status = 1;
- PMD_INIT_LOG(DEBUG, "Port %d is up\n",dev->data->port_id);
+ PMD_INIT_LOG(DEBUG, "Port %d is up\n",
+ dev->data->port_id);
}
} else {
- link.link_status = 1; //Link up
+ link.link_status = 1; /* Link up */
}
virtio_dev_atomic_write_link_status(dev, &link);
- if(old.link_status == link.link_status)
- return (-1);
+ if (old.link_status == link.link_status)
+ return -1;
/*changed*/
- return (0);
+ return 0;
}
static void
virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct virtio_hw *hw = VIRTIO_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
dev_info->driver_name = dev->driver->pci_drv.name;
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
#include "virtio_ethdev.h"
#include "virtqueue.h"
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(m, len)
#else
#define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
- return (m);
+ return m;
}
static void
*
*/
int i;
+
PMD_INIT_FUNC_TRACE();
/* Start rx vring. */
vq->mpool = mp;
dev->data->rx_queues[queue_idx] = vq;
- return (0);
+ return 0;
}
/*
}
dev->data->tx_queues[queue_idx] = vq;
- return (0);
+ return 0;
}
static void
if (likely(num > DESC_PER_CACHELINE))
num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
- if(num == 0) return 0;
+ if (num == 0)
+ return 0;
num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num);
PMD_RX_LOG(DEBUG, "used:%d dequeue:%d\n", nb_used, num);
- for (i = 0; i < num ; i ++) {
+ for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
PMD_RX_LOG(DEBUG, "packet len:%d\n", len[i]);
if (unlikely(len[i]
- < (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {
+ < (uint32_t)hw->vtnet_hdr_size + ETHER_HDR_LEN)) {
PMD_RX_LOG(ERR, "Packet drop\n");
nb_enqueued++;
virtio_discard_rxbuf(rxvq, rxm);
rxm->pkt.nb_segs = 1;
rxm->pkt.next = NULL;
rxm->pkt.pkt_len = (uint32_t)(len[i]
- - sizeof(struct virtio_net_hdr));
+ - sizeof(struct virtio_net_hdr));
rxm->pkt.data_len = (uint16_t)(len[i]
- - sizeof(struct virtio_net_hdr));
+ - sizeof(struct virtio_net_hdr));
VIRTIO_DUMP_PACKET(rxm, rxm->pkt.data_len);
rte_pktmbuf_free_seg(new_mbuf);
break;
}
- nb_enqueued ++;
+ nb_enqueued++;
}
if (likely(nb_enqueued)) {
if (unlikely(virtqueue_kick_prepare(rxvq))) {
vq_update_avail_idx(rxvq);
- return (nb_rx);
+ return nb_rx;
}
uint16_t
nb_tx = 0;
if (unlikely(nb_pkts < 1))
- return (nb_pkts);
+ return nb_pkts;
PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
nb_used = VIRTQUEUE_NUSED(txvq);
num--;
}
- if(!virtqueue_full(txvq)) {
+ if (!virtqueue_full(txvq)) {
txm = tx_pkts[nb_tx];
/* Enqueue Packet buffers */
error = virtqueue_enqueue_xmit(txvq, txm);
hw->eth_stats.opackets += nb_tx;
hw->eth_stats.q_opackets[txvq->queue_id] += nb_tx;
- if(unlikely(virtqueue_kick_prepare(txvq))) {
+ if (unlikely(virtqueue_kick_prepare(txvq))) {
virtqueue_notify(txvq);
PMD_TX_LOG(DEBUG, "Notified backend after xmit\n");
}
- return (nb_tx);
+ return nb_tx;
}
/**
* Get all mbufs to be freed.
*/
-struct rte_mbuf * virtqueue_detatch_unused(struct virtqueue *vq);
+struct rte_mbuf *virtqueue_detatch_unused(struct virtqueue *vq);
static inline int
virtqueue_full(const struct virtqueue *vq)
{
- return (vq->vq_free_cnt == 0);
+ return vq->vq_free_cnt == 0;
}
#define VIRTQUEUE_NUSED(vq) ((uint16_t)((vq)->vq_ring.used->idx - (vq)->vq_used_cons_idx))
}
static inline int __attribute__((always_inline))
-virtqueue_kick_prepare(struct virtqueue * vq)
+virtqueue_kick_prepare(struct virtqueue *vq)
{
return !(vq->vq_ring.used->flags & VRING_USED_F_NO_NOTIFY);
}
{
struct vq_desc_extra *dxp;
struct vring_desc *start_dp;
- uint16_t needed;
+ uint16_t needed = 1;
uint16_t head_idx, idx;
- needed = 1;
if (unlikely(vq->vq_free_cnt == 0))
- return (-ENOSPC);
+ return -ENOSPC;
if (unlikely(vq->vq_free_cnt < needed))
- return (-EMSGSIZE);
+ return -EMSGSIZE;
head_idx = vq->vq_desc_head_idx;
if (unlikely(head_idx >= vq->vq_nentries))
- return (-EFAULT);
+ return -EFAULT;
idx = head_idx;
dxp = &vq->vq_descx[idx];
vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
vq_update_avail_ring(vq, head_idx);
- return (0);
+ return 0;
}
static inline int __attribute__((always_inline))
{
struct vq_desc_extra *dxp;
struct vring_desc *start_dp;
- uint16_t needed;
+ uint16_t needed = 2;
uint16_t head_idx, idx;
- needed = 2;
+
if (unlikely(txvq->vq_free_cnt == 0))
- return (-ENOSPC);
+ return -ENOSPC;
if (unlikely(txvq->vq_free_cnt < needed))
- return (-EMSGSIZE);
+ return -EMSGSIZE;
head_idx = txvq->vq_desc_head_idx;
if (unlikely(head_idx >= txvq->vq_nentries))
- return (-EFAULT);
+ return -EFAULT;
idx = head_idx;
dxp = &txvq->vq_descx[idx];
txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed);
vq_update_avail_ring(txvq, head_idx);
- return (0);
+ return 0;
}
static inline uint16_t __attribute__((always_inline))
uint16_t i;
/* Caller does the check */
- for (i = 0; i < num ; i ++) {
+ for (i = 0; i < num; i++) {
used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
uep = &vq->vq_ring.used->ring[used_idx];
desc_idx = (uint16_t) uep->id;
vq->vq_descx[desc_idx].cookie = NULL;
}
- return (i);
+ return i;
}
static inline uint16_t __attribute__((always_inline))
return 0;
}
-#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
+#ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
#define VIRTQUEUE_DUMP(vq) do { \
uint16_t used_idx, nused; \
used_idx = (vq)->vq_ring.used->idx; \