PMD_INIT_LOG(ERR, "Control queue is not supported.");
return -1;
}
rte_spinlock_lock(&cvq->lock);
PMD_INIT_LOG(ERR, "Control queue is not supported.");
return -1;
}
rte_spinlock_lock(&cvq->lock);
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
"vq->hw->cvq = %p vq = %p",
PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, "
"vq->hw->cvq = %p vq = %p",
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
memcpy(cvq->virtio_net_hdr_mz->addr, ctrl,
sizeof(struct virtio_pmd_ctrl));
result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
else
result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
result = virtio_send_command_packed(cvq, ctrl, dlen, pkt_num);
else
result = virtio_send_command_split(cvq, ctrl, dlen, pkt_num);
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
vq->vq_desc_tail_idx = (uint16_t)(vq->vq_nentries - 1);
vq->vq_free_cnt = vq->vq_nentries;
memset(vq->vq_descx, 0, sizeof(struct vq_desc_extra) * vq->vq_nentries);
vring_desc_init_packed(vq, size);
} else {
struct vring *vr = &vq->vq_split.ring;
vring_desc_init_packed(vq, size);
} else {
struct vring *vr = &vq->vq_split.ring;
- vring_init_split(vr, ring_mem, VIRTIO_PCI_VRING_ALIGN, size);
+ vring_init_split(vr, ring_mem, VIRTIO_VRING_ALIGN, size);
/*
* Read the virtqueue size from the Queue Size field
* Always power of 2 and if 0 virtqueue does not exist
*/
/*
* Read the virtqueue size from the Queue Size field
* Always power of 2 and if 0 virtqueue does not exist
*/
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "virtqueue does not exist");
return -EINVAL;
}
PMD_INIT_LOG(DEBUG, "vq_size: %u", vq_size);
if (vq_size == 0) {
PMD_INIT_LOG(ERR, "virtqueue does not exist");
return -EINVAL;
}
PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
return -EINVAL;
}
snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
PMD_INIT_LOG(ERR, "split virtqueue size is not power of 2");
return -EINVAL;
}
snprintf(vq_name, sizeof(vq_name), "port%d_vq%d",
size = RTE_ALIGN_CEIL(sizeof(*vq) +
vq_size * sizeof(struct vq_desc_extra),
size = RTE_ALIGN_CEIL(sizeof(*vq) +
vq_size * sizeof(struct vq_desc_extra),
sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
} else if (queue_type == VTNET_CQ) {
/* Allocate a page for control vq command, data and status */
sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region);
} else if (queue_type == VTNET_CQ) {
/* Allocate a page for control vq command, data and status */
}
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
}
vq = rte_zmalloc_socket(vq_name, size, RTE_CACHE_LINE_SIZE,
vq->vq_packed.used_wrap_counter = 1;
vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
vq->vq_packed.event_flags_shadow = 0;
vq->vq_packed.used_wrap_counter = 1;
vq->vq_packed.cached_flags = VRING_PACKED_DESC_F_AVAIL;
vq->vq_packed.event_flags_shadow = 0;
- size = vring_size(hw, vq_size, VIRTIO_PCI_VRING_ALIGN);
- vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
+ size = vring_size(hw, vq_size, VIRTIO_VRING_ALIGN);
+ vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_VRING_ALIGN);
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
numa_node, RTE_MEMZONE_IOVA_CONTIG,
PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d",
size, vq->vq_ring_size);
mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size,
numa_node, RTE_MEMZONE_IOVA_CONTIG,
if (sz_hdr_mz) {
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
if (sz_hdr_mz) {
snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_vq%d_hdr",
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
numa_node, RTE_MEMZONE_IOVA_CONTIG,
RTE_CACHE_LINE_SIZE);
hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz,
numa_node, RTE_MEMZONE_IOVA_CONTIG,
RTE_CACHE_LINE_SIZE);
+ goto free_hdr_mz;
+ }
+
+ fake_mbuf = rte_zmalloc_socket("sw_ring", sizeof(*fake_mbuf),
+ RTE_CACHE_LINE_SIZE, numa_node);
+ if (!fake_mbuf) {
+ PMD_INIT_LOG(ERR, "can not allocate fake mbuf");
+ ret = -ENOMEM;
+ goto free_sw_ring;
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
txvq->port_id = dev->data->port_id;
txvq->mz = mz;
txvq->virtio_net_hdr_mz = hdr_mz;
txvq->virtio_net_hdr_mem = hdr_mz->iova;
} else if (queue_type == VTNET_CQ) {
cvq = &vq->cq;
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
/* first indirect descriptor is always the tx header */
memset(txr, 0, vq_size * sizeof(*txr));
for (i = 0; i < vq_size; i++) {
/* first indirect descriptor is always the tx header */
struct vring_desc *start_dp = txr[i].tx_indir;
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
struct vring_desc *start_dp = txr[i].tx_indir;
vring_desc_init_split(start_dp,
RTE_DIM(txr[i].tx_indir));
queue_type = virtio_get_queue_type(hw, i);
if (queue_type == VTNET_RQ) {
queue_type = virtio_get_queue_type(hw, i);
if (queue_type == VTNET_RQ) {
{
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
{
struct virtio_hw *hw = dev->data->dev_private;
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
virtio_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
{
struct virtnet_rx *rxvq = dev->data->rx_queues[queue_id];
offsetof(struct virtio_net_config, mac),
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
}
offsetof(struct virtio_net_config, mac),
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
}
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MAC)) {
- vtpci_read_dev_config(hw,
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MAC)) {
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, mac),
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
} else {
offsetof(struct virtio_net_config, mac),
&hw->mac_addr, RTE_ETHER_ADDR_LEN);
} else {
memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
/* Use atomic update if available */
memcpy(hw->mac_addr, mac_addr, RTE_ETHER_ADDR_LEN);
/* Use atomic update if available */
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
}
return virtio_send_command(hw->cvq, &ctrl, &len, 1);
}
offsetof(struct virtio_net_config, mtu),
&config.mtu, sizeof(config.mtu));
offsetof(struct virtio_net_config, mtu),
&config.mtu, sizeof(config.mtu));
- if (vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) {
- vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
+ if (virtio_with_feature(hw, VIRTIO_F_VERSION_1)) {
+ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_FEATURES_OK);
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
if (virtio_intr_unmask(dev) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
PMD_DRV_LOG(INFO, "interrupt status = %#x", isr);
if (virtio_intr_unmask(dev) < 0)
PMD_DRV_LOG(ERR, "interrupt enable failed");
if (virtio_dev_link_update(dev, 0) == 0)
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
if (virtio_dev_link_update(dev, 0) == 0)
rte_eth_dev_callback_process(dev,
RTE_ETH_EVENT_INTR_LSC,
NULL);
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
- vtpci_read_dev_config(hw,
+ if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if (status & VIRTIO_NET_S_ANNOUNCE) {
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if (status & VIRTIO_NET_S_ANNOUNCE) {
struct virtio_hw *hw = eth_dev->data->dev_private;
eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
struct virtio_hw *hw = eth_dev->data->dev_private;
eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
PMD_INIT_LOG(INFO,
"virtio: using packed ring %s Tx path on port %u",
hw->use_vec_tx ? "vectorized" : "standard",
PMD_INIT_LOG(INFO,
"virtio: using packed ring %s Tx path on port %u",
hw->use_vec_tx ? "vectorized" : "standard",
if (hw->use_vec_rx) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring vectorized Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst =
&virtio_recv_pkts_packed_vec;
if (hw->use_vec_rx) {
PMD_INIT_LOG(INFO,
"virtio: using packed ring vectorized Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst =
&virtio_recv_pkts_packed_vec;
"virtio: using inorder Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
"virtio: using inorder Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
PMD_INIT_LOG(INFO, "queue/interrupt binding");
for (i = 0; i < dev->data->nb_rx_queues; ++i) {
dev->intr_handle->intr_vec[i] = i + 1;
PMD_INIT_LOG(INFO, "queue/interrupt binding");
for (i = 0; i < dev->data->nb_rx_queues; ++i) {
dev->intr_handle->intr_vec[i] = i + 1;
PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
for (i = 0; i < dev->data->nb_rx_queues; ++i)
PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
for (i = 0; i < dev->data->nb_rx_queues; ++i)
- vtpci_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
- if (virtio_negotiate_features(hw, req_features) < 0)
+ virtio_set_status(hw, VIRTIO_CONFIG_STATUS_DRIVER);
+ if (virtio_ethdev_negotiate_features(hw, req_features) < 0)
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Setting up rx_header size for the device */
eth_dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
/* Setting up rx_header size for the device */
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
- vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
- vtpci_with_feature(hw, VIRTIO_F_RING_PACKED))
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF) ||
+ virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ virtio_with_packed_queue(hw))
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf);
else
hw->vtnet_hdr_size = sizeof(struct virtio_net_hdr);
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
if (hw->speed == ETH_SPEED_NUM_UNKNOWN) {
offsetof(struct virtio_net_config, speed),
&config->speed, sizeof(config->speed));
offsetof(struct virtio_net_config, speed),
&config->speed, sizeof(config->speed));
offsetof(struct virtio_net_config, duplex),
&config->duplex, sizeof(config->duplex));
hw->speed = config->speed;
offsetof(struct virtio_net_config, duplex),
&config->duplex, sizeof(config->duplex));
hw->speed = config->speed;
hw->duplex = ETH_LINK_FULL_DUPLEX;
PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
hw->speed, hw->duplex);
hw->duplex = ETH_LINK_FULL_DUPLEX;
PMD_INIT_LOG(DEBUG, "link speed = %d, duplex = %d",
hw->speed, hw->duplex);
offsetof(struct virtio_net_config, mac),
&config->mac, sizeof(config->mac));
offsetof(struct virtio_net_config, mac),
&config->mac, sizeof(config->mac));
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
- vtpci_read_dev_config(hw,
+ if (virtio_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
&config->status, sizeof(config->status));
} else {
offsetof(struct virtio_net_config, status),
&config->status, sizeof(config->status));
} else {
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MQ)) {
- vtpci_read_dev_config(hw,
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MQ)) {
+ virtio_read_dev_config(hw,
offsetof(struct virtio_net_config, max_virtqueue_pairs),
&config->max_virtqueue_pairs,
sizeof(config->max_virtqueue_pairs));
offsetof(struct virtio_net_config, max_virtqueue_pairs),
&config->max_virtqueue_pairs,
sizeof(config->max_virtqueue_pairs));
- if (vtpci_with_feature(hw, VIRTIO_NET_F_MTU)) {
- vtpci_read_dev_config(hw,
+ if (virtio_with_feature(hw, VIRTIO_NET_F_MTU)) {
+ virtio_read_dev_config(hw,
- return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+ return virtio_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+ virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
- return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+ return virtio_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+ virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+ virtio_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
- (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
+ (!virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ !virtio_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6))) {
- if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- && !vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
+ if ((rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER) &&
+ !virtio_with_feature(hw, VIRTIO_NET_F_CTRL_VLAN)) {
#if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
if ((hw->use_vec_rx || hw->use_vec_tx) &&
(!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
#if defined(RTE_ARCH_X86_64) && defined(CC_AVX512_SUPPORT)
if ((hw->use_vec_rx || hw->use_vec_tx) &&
(!rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) ||
- !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
- !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+ !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized path for requirements not met");
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized path for requirements not met");
#elif defined(RTE_ARCH_ARM)
if ((hw->use_vec_rx || hw->use_vec_tx) &&
(!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
#elif defined(RTE_ARCH_ARM)
if ((hw->use_vec_rx || hw->use_vec_tx) &&
(!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) ||
- !vtpci_with_feature(hw, VIRTIO_F_IN_ORDER) ||
- !vtpci_with_feature(hw, VIRTIO_F_VERSION_1) ||
+ !virtio_with_feature(hw, VIRTIO_F_IN_ORDER) ||
+ !virtio_with_feature(hw, VIRTIO_F_VERSION_1) ||
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized path for requirements not met");
rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128)) {
PMD_DRV_LOG(INFO,
"disabled packed ring vectorized path for requirements not met");
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
}
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
}
PMD_INIT_LOG(DEBUG, "Notified backend at initialization");
for (i = 0; i < dev->data->nb_rx_queues; i++) {
/* Initialize Link state */
virtio_dev_link_update(dev, 0);
/* Initialize Link state */
virtio_dev_link_update(dev, 0);
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
offsetof(struct virtio_net_config, status),
&status, sizeof(status));
if ((status & VIRTIO_NET_S_LINK_UP) == 0) {
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
if (host_features & (1ULL << VIRTIO_NET_F_GUEST_CSUM)) {