+
+ eth_dev->tx_pkt_prepare = virtio_xmit_pkts_prepare;
+ if (virtio_with_packed_queue(hw)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring %s Tx path on port %u",
+ hw->use_vec_tx ? "vectorized" : "standard",
+ eth_dev->data->port_id);
+ if (hw->use_vec_tx)
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed_vec;
+ else
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_packed;
+ } else {
+ if (hw->use_inorder_tx) {
+ PMD_INIT_LOG(INFO, "virtio: using inorder Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts_inorder;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Tx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->tx_pkt_burst = virtio_xmit_pkts;
+ }
+ }
+
+ if (virtio_with_packed_queue(hw)) {
+ if (hw->use_vec_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring vectorized Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst =
+ &virtio_recv_pkts_packed_vec;
+ } else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst =
+ &virtio_recv_mergeable_pkts_packed;
+ } else {
+ PMD_INIT_LOG(INFO,
+ "virtio: using packed ring standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts_packed;
+ }
+ } else {
+ if (hw->use_vec_rx) {
+ PMD_INIT_LOG(INFO, "virtio: using vectorized Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = virtio_recv_pkts_vec;
+ } else if (hw->use_inorder_rx) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using inorder Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts_inorder;
+ } else if (virtio_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) {
+ PMD_INIT_LOG(INFO,
+ "virtio: using mergeable buffer Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_mergeable_pkts;
+ } else {
+ PMD_INIT_LOG(INFO, "virtio: using standard Rx path on port %u",
+ eth_dev->data->port_id);
+ eth_dev->rx_pkt_burst = &virtio_recv_pkts;
+ }
+ }
+
+}
+
+/* Only support 1:1 queue/interrupt mapping so far.
+ * TODO: support n:1 queue/interrupt mapping when there are limited number of
+ * interrupt vectors (<N+1).
+ */
+static int
+virtio_queues_bind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt binding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i) {
+ dev->intr_handle->intr_vec[i] = i + 1;
+ if (VIRTIO_OPS(hw)->set_queue_irq(hw, hw->vqs[i * 2], i + 1) ==
+ VIRTIO_MSI_NO_VECTOR) {
+ PMD_DRV_LOG(ERR, "failed to set queue vector");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static void
+virtio_queues_unbind_intr(struct rte_eth_dev *dev)
+{
+ uint32_t i;
+ struct virtio_hw *hw = dev->data->dev_private;
+
+ PMD_INIT_LOG(INFO, "queue/interrupt unbinding");
+ for (i = 0; i < dev->data->nb_rx_queues; ++i)
+ VIRTIO_OPS(hw)->set_queue_irq(hw,
+ hw->vqs[i * VTNET_CQ],
+ VIRTIO_MSI_NO_VECTOR);