Previously, virtio split ring vectorized path was enabled by default.
This is not suitable for everyone because that path does not follow
virtio spec. Add new devarg for virtio vectorized path selection. By
default vectorized path is disabled.
Signed-off-by: Marvin Liu <yong.liu@intel.com>
Reviewed-by: Maxime Coquelin <maxime.coquelin@redhat.com>
rte_eth_link_get_nowait function.
(Default: 10000 (10G))
rte_eth_link_get_nowait function.
(Default: 10000 (10G))
+#. ``vectorized``:
+
+ It is used to specify whether virtio device prefers to use vectorized path.
+ Afterwards, dependencies of vectorized path will be checked in path
+ election.
+ (Default: 0 (disabled))
+
Below devargs are supported by the virtio-user vdev:
#. ``path``:
Below devargs are supported by the virtio-user vdev:
#. ``path``:
static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
int *vdpa,
static uint32_t virtio_dev_speed_capa_get(uint32_t speed);
static int virtio_dev_devargs_parse(struct rte_devargs *devargs,
int *vdpa,
+ uint32_t *speed,
+ int *vectorized);
static int virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
static int virtio_dev_info_get(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info);
static int virtio_dev_link_update(struct rte_eth_dev *dev,
eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
}
} else {
eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
}
} else {
- if (hw->use_simple_rx) {
- PMD_INIT_LOG(INFO, "virtio: using simple Rx path on port %u",
+ if (hw->use_vec_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
} else if (hw->use_inorder_rx) {
eth_dev->data->port_id);
eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
} else if (hw->use_inorder_rx) {
{
struct virtio_hw *hw = eth_dev->data->dev_private;
uint32_t speed = SPEED_UNKNOWN;
{
struct virtio_hw *hw = eth_dev->data->dev_private;
uint32_t speed = SPEED_UNKNOWN;
int ret;
if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
int ret;
if (sizeof(struct virtio_net_hdr_mrg_rxbuf) > RTE_PKTMBUF_HEADROOM) {
return 0;
}
ret = virtio_dev_devargs_parse(eth_dev->device->devargs,
return 0;
}
ret = virtio_dev_devargs_parse(eth_dev->device->devargs,
+ NULL, &speed, &vectorized);
if (ret < 0)
return ret;
hw->speed = speed;
if (ret < 0)
return ret;
hw->speed = speed;
if (ret < 0)
goto err_virtio_init;
if (ret < 0)
goto err_virtio_init;
+ if (vectorized) {
+ if (!vtpci_packed_queue(hw))
+ hw->use_vec_rx = 1;
+ }
+
hw->opened = true;
return 0;
hw->opened = true;
return 0;
+static int vectorized_check_handler(__rte_unused const char *key,
+ const char *value, void *ret_val)
+{
+ if (strcmp(value, "1") == 0)
+ *(int *)ret_val = 1;
+ else
+ *(int *)ret_val = 0;
+
+ return 0;
+}
#define VIRTIO_ARG_SPEED "speed"
#define VIRTIO_ARG_VDPA "vdpa"
#define VIRTIO_ARG_SPEED "speed"
#define VIRTIO_ARG_VDPA "vdpa"
+#define VIRTIO_ARG_VECTORIZED "vectorized"
static int
virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa,
static int
virtio_dev_devargs_parse(struct rte_devargs *devargs, int *vdpa,
+ uint32_t *speed, int *vectorized)
{
struct rte_kvargs *kvlist;
int ret = 0;
{
struct rte_kvargs *kvlist;
int ret = 0;
+ if (vectorized &&
+ rte_kvargs_count(kvlist, VIRTIO_ARG_VECTORIZED) == 1) {
+ ret = rte_kvargs_process(kvlist,
+ VIRTIO_ARG_VECTORIZED,
+ vectorized_check_handler, vectorized);
+ if (ret < 0) {
+ PMD_INIT_LOG(ERR, "Failed to parse %s",
+ VIRTIO_ARG_VECTORIZED);
+ goto exit;
+ }
+ }
+
exit:
rte_kvargs_free(kvlist);
return ret;
exit:
rte_kvargs_free(kvlist);
return ret;
int vdpa = 0;
int ret = 0;
int vdpa = 0;
int ret = 0;
- ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL);
+ ret = virtio_dev_devargs_parse(pci_dev->device.devargs, &vdpa, NULL,
+ NULL);
if (ret < 0) {
PMD_INIT_LOG(ERR, "devargs parsing is failed");
return ret;
if (ret < 0) {
PMD_INIT_LOG(ERR, "devargs parsing is failed");
return ret;
- hw->use_simple_rx = 1;
-
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
hw->use_inorder_rx = 1;
if (vtpci_with_feature(hw, VIRTIO_F_IN_ORDER)) {
hw->use_inorder_tx = 1;
hw->use_inorder_rx = 1;
}
if (vtpci_packed_queue(hw)) {
}
if (vtpci_packed_queue(hw)) {
hw->use_inorder_rx = 0;
}
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
hw->use_inorder_rx = 0;
}
#if defined RTE_ARCH_ARM64 || defined RTE_ARCH_ARM
if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) {
}
#endif
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
}
#endif
if (vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
}
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_VLAN_STRIP))
}
if (rx_offloads & (DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
DEV_RX_OFFLOAD_TCP_LRO |
DEV_RX_OFFLOAD_VLAN_STRIP))
uint8_t vlan_strip;
uint8_t use_msix;
uint8_t modern;
uint8_t vlan_strip;
uint8_t use_msix;
uint8_t modern;
+ uint8_t use_vec_rx;
+ uint8_t use_vec_tx;
uint8_t use_inorder_rx;
uint8_t use_inorder_tx;
uint8_t weak_barriers;
uint8_t use_inorder_rx;
uint8_t use_inorder_tx;
uint8_t weak_barriers;
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
/* Allocate blank mbufs for the each rx descriptor */
nbufs = 0;
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
for (desc_idx = 0; desc_idx < vq->vq_nentries;
desc_idx++) {
vq->vq_split.ring.avail->ring[desc_idx] = desc_idx;
- if (hw->use_simple_rx) {
+ if (hw->use_vec_rx && !vtpci_packed_queue(hw)) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxvq);
nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
*/
hw->use_msix = 1;
hw->modern = 0;
*/
hw->use_msix = 1;
hw->modern = 0;
hw->use_inorder_rx = 0;
hw->use_inorder_tx = 0;
hw->virtio_user_dev = dev;
hw->use_inorder_rx = 0;
hw->use_inorder_tx = 0;
hw->virtio_user_dev = dev;
end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
for (idx = 0; idx < vq->vq_nentries; idx++) {
end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
for (idx = 0; idx < vq->vq_nentries; idx++) {
- if (hw->use_simple_rx && type == VTNET_RQ) {
+ if (hw->use_vec_rx && !vtpci_packed_queue(hw) &&
+ type == VTNET_RQ) {
if (start <= end && idx >= start && idx < end)
continue;
if (start > end && (idx >= start || idx < end))
if (start <= end && idx >= start && idx < end)
continue;
if (start > end && (idx >= start || idx < end))
for (i = 0; i < nb_used; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
uep = &vq->vq_split.ring.used->ring[used_idx];
for (i = 0; i < nb_used; i++) {
used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
uep = &vq->vq_split.ring.used->ring[used_idx];
- if (hw->use_simple_rx) {
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
vq->vq_free_cnt++;
desc_idx = used_idx;
rte_pktmbuf_free(vq->sw_ring[desc_idx]);
vq->vq_free_cnt++;
vq->vq_used_cons_idx++;
}
vq->vq_used_cons_idx++;
}
- if (hw->use_simple_rx) {
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxq);
if (virtqueue_kick_prepare(vq))
while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
virtio_rxq_rearm_vec(rxq);
if (virtqueue_kick_prepare(vq))