+uint16_t
+virtio_xmit_pkts_prepare(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ uint16_t nb_tx;
+ int error;
+
+ for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
+ struct rte_mbuf *m = tx_pkts[nb_tx];
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ error = rte_validate_tx_offload(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+#endif
+
+ /* Do VLAN tag insertion */
+ if (unlikely(m->ol_flags & PKT_TX_VLAN_PKT)) {
+ error = rte_vlan_insert(&m);
+ /* rte_vlan_insert() may change pointer
+ * even in the case of failure
+ */
+ tx_pkts[nb_tx] = m;
+
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+ }
+
+ error = rte_net_intel_cksum_prepare(m);
+ if (unlikely(error)) {
+ rte_errno = -error;
+ break;
+ }
+
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ virtio_tso_fix_cksum(m);
+ }
+
+ return nb_tx;
+}
+