static __rte_always_inline void
virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
{
- uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
+ uint64_t csum_l4 = m_buf->ol_flags & RTE_MBUF_F_TX_L4_MASK;
- if (m_buf->ol_flags & PKT_TX_TCP_SEG)
- csum_l4 |= PKT_TX_TCP_CKSUM;
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
+ csum_l4 |= RTE_MBUF_F_TX_TCP_CKSUM;
if (csum_l4) {
net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
switch (csum_l4) {
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_tcp_hdr,
cksum));
break;
- case PKT_TX_UDP_CKSUM:
+ case RTE_MBUF_F_TX_UDP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_udp_hdr,
dgram_cksum));
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
net_hdr->csum_offset = (offsetof(struct rte_sctp_hdr,
cksum));
break;
}
/* IP cksum verification cannot be bypassed, then calculate here */
- if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
struct rte_ipv4_hdr *ipv4_hdr;
ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct rte_ipv4_hdr *,
ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
}
- if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
- if (m_buf->ol_flags & PKT_TX_IPV4)
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
+ if (m_buf->ol_flags & RTE_MBUF_F_TX_IPV4)
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
net_hdr->gso_size = m_buf->tso_segsz;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
+ m_buf->l4_len;
- } else if (m_buf->ol_flags & PKT_TX_UDP_SEG) {
+ } else if (m_buf->ol_flags & RTE_MBUF_F_TX_UDP_SEG) {
net_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
net_hdr->gso_size = m_buf->tso_segsz;
net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len +
uint16_t vq_size, uint16_t n_inflight)
{
return pkts_idx > n_inflight ? (pkts_idx - n_inflight) :
- (vq_size - n_inflight + pkts_idx) & (vq_size - 1);
+ (vq_size - n_inflight + pkts_idx) % vq_size;
}
static __rte_always_inline void
struct async_inflight_info *pkts_info = vq->async_pkts_info;
uint32_t n_pkts = 0, pkt_err = 0;
uint32_t num_async_pkts = 0, num_done_pkts = 0;
+ int32_t n_xfer;
struct {
uint16_t pkt_idx;
uint16_t last_avail_idx;
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await <
BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
}
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid,
- queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
uint16_t async_descs_idx = 0;
uint16_t num_buffers;
uint16_t num_descs;
+ int32_t n_xfer;
struct rte_vhost_iov_iter *it_pool = vq->it_pool;
struct iovec *vec_pool = vq->vec_pool;
*/
if (unlikely(pkt_burst_idx >= VHOST_ASYNC_BATCH_THRESHOLD ||
((VHOST_MAX_ASYNC_VEC >> 1) - segs_await < BUF_VECTOR_MAX))) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id,
- tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid,
+ queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
iovec_idx = 0;
it_idx = 0;
segs_await = 0;
} while (pkt_idx < count);
if (pkt_burst_idx) {
- n_pkts = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ n_xfer = vq->async_ops.transfer_data(dev->vid, queue_id, tdes, 0, pkt_burst_idx);
+ if (n_xfer >= 0) {
+ n_pkts = n_xfer;
+ } else {
+ VHOST_LOG_DATA(ERR, "(%d) %s: failed to transfer data for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts = 0;
+ }
+
vq->async_pkts_inflight_n += n_pkts;
if (unlikely(n_pkts < pkt_burst_idx))
} while (nr_left > 0);
}
-uint16_t rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+static __rte_always_inline uint16_t
+vhost_poll_enqueue_completed(struct virtio_net *dev, uint16_t queue_id,
struct rte_mbuf **pkts, uint16_t count)
{
- struct virtio_net *dev = get_device(vid);
struct vhost_virtqueue *vq;
uint16_t n_pkts_cpl = 0, n_pkts_put = 0, n_descs = 0, n_buffers = 0;
uint16_t start_idx, pkts_idx, vq_size;
struct async_inflight_info *pkts_info;
uint16_t from, i;
-
- if (!dev)
- return 0;
-
- VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
- if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
- VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
+ int32_t n_cpl;
vq = dev->virtqueue[queue_id];
- if (unlikely(!vq->async_registered)) {
- VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
- dev->vid, __func__, queue_id);
- return 0;
- }
-
- rte_spinlock_lock(&vq->access_lock);
-
pkts_idx = vq->async_pkts_idx % vq->size;
pkts_info = vq->async_pkts_info;
vq_size = vq->size;
start_idx = virtio_dev_rx_async_get_info_idx(pkts_idx,
vq_size, vq->async_pkts_inflight_n);
- if (count > vq->async_last_pkts_n)
- n_pkts_cpl = vq->async_ops.check_completed_copies(vid,
+ if (count > vq->async_last_pkts_n) {
+ n_cpl = vq->async_ops.check_completed_copies(dev->vid,
queue_id, 0, count - vq->async_last_pkts_n);
+ if (n_cpl >= 0) {
+ n_pkts_cpl = n_cpl;
+ } else {
+ VHOST_LOG_DATA(ERR,
+ "(%d) %s: failed to check completed copies for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ n_pkts_cpl = 0;
+ }
+ }
n_pkts_cpl += vq->async_last_pkts_n;
n_pkts_put = RTE_MIN(count, n_pkts_cpl);
if (unlikely(n_pkts_put == 0)) {
vq->async_last_pkts_n = n_pkts_cpl;
- goto done;
+ return 0;
}
if (vq_is_packed(dev)) {
for (i = 0; i < n_pkts_put; i++) {
- from = (start_idx + i) & (vq_size - 1);
+ from = (start_idx + i) % vq_size;
n_buffers += pkts_info[from].nr_buffers;
pkts[i] = pkts_info[from].mbuf;
}
}
}
-done:
+ return n_pkts_put;
+}
+
+uint16_t
+rte_vhost_poll_enqueue_completed(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ rte_spinlock_lock(&vq->access_lock);
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
rte_spinlock_unlock(&vq->access_lock);
- return n_pkts_put;
+ return n_pkts_cpl;
+}
+
+uint16_t
+rte_vhost_clear_queue_thread_unsafe(int vid, uint16_t queue_id,
+ struct rte_mbuf **pkts, uint16_t count)
+{
+ struct virtio_net *dev = get_device(vid);
+ struct vhost_virtqueue *vq;
+ uint16_t n_pkts_cpl = 0;
+
+ if (!dev)
+ return 0;
+
+ VHOST_LOG_DATA(DEBUG, "(%d) %s\n", dev->vid, __func__);
+ if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: invalid virtqueue idx %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ vq = dev->virtqueue[queue_id];
+
+ if (unlikely(!vq->async_registered)) {
+ VHOST_LOG_DATA(ERR, "(%d) %s: async not registered for queue id %d.\n",
+ dev->vid, __func__, queue_id);
+ return 0;
+ }
+
+ n_pkts_cpl = vhost_poll_enqueue_completed(dev, queue_id, pkts, count);
+
+ return n_pkts_cpl;
}
static __rte_always_inline uint32_t
m->l3_len = rte_ipv4_hdr_len(ipv4_hdr);
if (data_len < m->l2_len + m->l3_len)
goto error;
- m->ol_flags |= PKT_TX_IPV4;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV4;
*l4_proto = ipv4_hdr->next_proto_id;
break;
case RTE_ETHER_TYPE_IPV6:
ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct rte_ipv6_hdr *,
m->l2_len);
m->l3_len = sizeof(struct rte_ipv6_hdr);
- m->ol_flags |= PKT_TX_IPV6;
+ m->ol_flags |= RTE_MBUF_F_TX_IPV6;
*l4_proto = ipv6_hdr->proto;
break;
default:
case (offsetof(struct rte_tcp_hdr, cksum)):
if (l4_proto != IPPROTO_TCP)
goto error;
- m->ol_flags |= PKT_TX_TCP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_CKSUM;
break;
case (offsetof(struct rte_udp_hdr, dgram_cksum)):
if (l4_proto != IPPROTO_UDP)
goto error;
- m->ol_flags |= PKT_TX_UDP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_UDP_CKSUM;
break;
case (offsetof(struct rte_sctp_hdr, cksum)):
if (l4_proto != IPPROTO_SCTP)
goto error;
- m->ol_flags |= PKT_TX_SCTP_CKSUM;
+ m->ol_flags |= RTE_MBUF_F_TX_SCTP_CKSUM;
break;
default:
goto error;
tcp_len = (tcp_hdr->data_off & 0xf0) >> 2;
if (data_len < m->l2_len + m->l3_len + tcp_len)
goto error;
- m->ol_flags |= PKT_TX_TCP_SEG;
+ m->ol_flags |= RTE_MBUF_F_TX_TCP_SEG;
m->tso_segsz = hdr->gso_size;
m->l4_len = tcp_len;
break;
case VIRTIO_NET_HDR_GSO_UDP:
if (l4_proto != IPPROTO_UDP)
goto error;
- m->ol_flags |= PKT_TX_UDP_SEG;
+ m->ol_flags |= RTE_MBUF_F_TX_UDP_SEG;
m->tso_segsz = hdr->gso_size;
m->l4_len = sizeof(struct rte_udp_hdr);
break;
return;
}
- m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
+ m->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN;
ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
m->packet_type = ptype;
hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
if (hdr->csum_start <= hdrlen && l4_supported != 0) {
- m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_NONE;
} else {
/* Unknown proto or tunnel, do sw cksum. We can assume
* the cksum field is in the first segment since the
case VIRTIO_NET_HDR_GSO_TCPV6:
if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_TCP)
break;
- m->ol_flags |= PKT_RX_LRO | PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
m->tso_segsz = hdr->gso_size;
break;
case VIRTIO_NET_HDR_GSO_UDP:
if ((ptype & RTE_PTYPE_L4_MASK) != RTE_PTYPE_L4_UDP)
break;
- m->ol_flags |= PKT_RX_LRO | PKT_RX_L4_CKSUM_NONE;
+ m->ol_flags |= RTE_MBUF_F_RX_LRO | RTE_MBUF_F_RX_L4_CKSUM_NONE;
m->tso_segsz = hdr->gso_size;
break;
default: