struct virtio_pmd_ctrl *ctrl,
int *dlen, int pkt_num)
{
- struct virtqueue *vq = cvq->vq;
+ struct virtqueue *vq = virtnet_cq_to_vq(cvq);
int head;
struct vring_packed_desc *desc = vq->vq_packed.ring.desc;
struct virtio_pmd_ctrl *result;
int *dlen, int pkt_num)
{
struct virtio_pmd_ctrl *result;
- struct virtqueue *vq = cvq->vq;
+ struct virtqueue *vq = virtnet_cq_to_vq(cvq);
uint32_t head, i;
int k, sum = 0;
ctrl->status = status;
- if (!cvq || !cvq->vq) {
+ if (!cvq) {
PMD_INIT_LOG(ERR, "Control queue is not supported.");
return -1;
}
rte_spinlock_lock(&cvq->lock);
- vq = cvq->vq;
+ vq = virtnet_cq_to_vq(cvq);
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
"vq->hw->cvq = %p vq = %p",
int queue_type = virtio_get_queue_type(hw, queue_idx);
int ret;
int numa_node = dev->device->numa_node;
+ struct rte_mbuf *fake_mbuf = NULL;
PMD_INIT_LOG(INFO, "setting up queue: %u on NUMA node %d",
queue_idx, numa_node);
mz = rte_memzone_lookup(vq_name);
if (mz == NULL) {
ret = -ENOMEM;
- goto fail_q_alloc;
+ goto free_vq;
}
}
hdr_mz = rte_memzone_lookup(vq_hdr_name);
if (hdr_mz == NULL) {
ret = -ENOMEM;
- goto fail_q_alloc;
+ goto free_mz;
}
}
}
if (!sw_ring) {
PMD_INIT_LOG(ERR, "can not allocate RX soft ring");
ret = -ENOMEM;
- goto fail_q_alloc;
+ goto free_hdr_mz;
+ }
+
+ fake_mbuf = rte_zmalloc_socket("sw_ring", sizeof(*fake_mbuf),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!fake_mbuf) {
+ PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+ ret = -ENOMEM;
+ goto free_sw_ring;
}
vq->sw_ring = sw_ring;
rxvq = &vq->rxq;
- rxvq->vq = vq;
rxvq->port_id = dev->data->port_id;
rxvq->mz = mz;
+ rxvq->fake_mbuf = fake_mbuf;
} else if (queue_type == VTNET_TQ) {
txvq = &vq->txq;
- txvq->vq = vq;
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
- cvq->vq = vq;
cvq->mz = mz;
cvq->virtio_net_hdr_mz = hdr_mz;
cvq->virtio_net_hdr_mem = hdr_mz->iova;
if (VIRTIO_OPS(hw)->setup_queue(hw, vq) < 0) {
PMD_INIT_LOG(ERR, "setup_queue failed");
- return -EINVAL;
+ ret = -EINVAL;
+ goto clean_vq;
}
return 0;
-fail_q_alloc:
+clean_vq:
+ hw->cvq = NULL;
+ rte_free(fake_mbuf);
+free_sw_ring:
rte_free(sw_ring);
+free_hdr_mz:
rte_memzone_free(hdr_mz);
+free_mz:
rte_memzone_free(mz);
+free_vq:
rte_free(vq);
return ret;
queue_type = virtio_get_queue_type(hw, i);
if (queue_type == VTNET_RQ) {
+ rte_free(vq->rxq.fake_mbuf);
rte_free(vq->sw_ring);
rte_memzone_free(vq->rxq.mz);
} else if (queue_type == VTNET_TQ) {
return 0;
}
+uint16_t
+virtio_rx_mem_pool_buf_size(struct rte_mempool *mp)
+{
+ return rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
+}
+
+bool
+virtio_rx_check_scatter(uint16_t max_rx_pkt_len, uint16_t rx_buf_size,
+ bool rx_scatter_enabled, const char **error)
+{
+ if (!rx_scatter_enabled && max_rx_pkt_len > rx_buf_size) {
+ *error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+virtio_check_scatter_on_all_rx_queues(struct rte_eth_dev *dev,
+ uint16_t frame_size)
+{
+ struct virtio_hw *hw = dev->data->dev_private;
+ struct virtnet_rx *rxvq;
+ struct virtqueue *vq;
+ unsigned int qidx;
+ uint16_t buf_size;
+ const char *error;
+
+ if (hw->vqs == NULL)
+ return true;
+
+ for (qidx = 0; (vq = hw->vqs[2 * qidx + VTNET_SQ_RQ_QUEUE_IDX]) != NULL;
+ qidx++) {
+ rxvq = &vq->rxq;
+ if (rxvq->mpool == NULL)
+ continue;
+ buf_size = virtio_rx_mem_pool_buf_size(rxvq->mpool);
+
+ if (!virtio_rx_check_scatter(frame_size, buf_size,
+ hw->rx_ol_scatter, &error)) {
+ PMD_INIT_LOG(ERR, "MTU check for RxQ %u failed: %s",
+ qidx, error);
+ return false;
+ }
+ }
+
+ return true;
+}
+
#define VLAN_TAG_LEN 4 /* 802.3ac tag (not DMA'd) */
static int
virtio_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
RTE_ETHER_MIN_MTU, max_frame_size - ether_hdr_len);
return -EINVAL;
}
+
+ if (!virtio_check_scatter_on_all_rx_queues(dev, frame_size)) {
+ PMD_INIT_LOG(ERR, "MTU vs Rx scatter and Rx buffers check failed");
+ return -EINVAL;
+ }
+
+ hw->max_rx_pkt_len = frame_size;
+ dev->data->dev_conf.rxmode.max_rx_pkt_len = hw->max_rx_pkt_len;
+
return 0;
}
{
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
virtqueue_enable_intr(vq);
virtio_mb(hw->weak_barriers);
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
- struct virtqueue *vq = rxvq->vq;
+ struct virtqueue *vq = virtnet_rxq_to_vq(rxvq);
virtqueue_disable_intr(vq);
return 0;
if (rxmode->max_rx_pkt_len > hw->max_mtu + ether_hdr_len)
req_features &= ~(1ULL << VIRTIO_NET_F_MTU);
+ hw->max_rx_pkt_len = rxmode->max_rx_pkt_len;
+
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM))
req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
hw->vlan_strip = 1;
+ hw->rx_ol_scatter = (rx_offloads & DEV_RX_OFFLOAD_SCATTER);
+
if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
!virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
PMD_DRV_LOG(ERR,
virtio_dev_start(struct rte_eth_dev *dev)
{
uint16_t nb_queues, i;
- struct virtnet_rx *rxvq;
- struct virtnet_tx *txvq __rte_unused;
+ struct virtqueue *vq;
struct virtio_hw *hw = dev->data->dev_private;
int ret;
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxvq = dev->data->rx_queues[i];
+ vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
/* Flush the old packets */
- virtqueue_rxvq_flush(rxvq->vq);
- virtqueue_notify(rxvq->vq);
+ virtqueue_rxvq_flush(vq);
+ virtqueue_notify(vq);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txvq = dev->data->tx_queues[i];
- virtqueue_notify(txvq->vq);
+ vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+ virtqueue_notify(vq);
}
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- rxvq = dev->data->rx_queues[i];
- VIRTQUEUE_DUMP(rxvq->vq);
+ vq = virtnet_rxq_to_vq(dev->data->rx_queues[i]);
+ VIRTQUEUE_DUMP(vq);
}
for (i = 0; i < dev->data->nb_tx_queues; i++) {
- txvq = dev->data->tx_queues[i];
- VIRTQUEUE_DUMP(txvq->vq);
+ vq = virtnet_txq_to_vq(dev->data->tx_queues[i]);
+ VIRTQUEUE_DUMP(vq);
}
set_rxtx_funcs(dev);
dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE;
dev_info->max_rx_pktlen = VIRTIO_MAX_RX_PKTLEN;
dev_info->max_mac_addrs = VIRTIO_MAX_MAC_ADDRS;
+ dev_info->max_mtu = hw->max_mtu;
host_features = VIRTIO_OPS(hw)->get_features(hw);
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (host_features & (1ULL << VIRTIO_NET_F_MRG_RXBUF))
+ dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SCATTER;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa |=
DEV_RX_OFFLOAD_TCP_CKSUM |
return 0;
}
-RTE_LOG_REGISTER(virtio_logtype_init, pmd.net.virtio.init, NOTICE);
-RTE_LOG_REGISTER(virtio_logtype_driver, pmd.net.virtio.driver, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(virtio_logtype_init, init, NOTICE);
+RTE_LOG_REGISTER_SUFFIX(virtio_logtype_driver, driver, NOTICE);