rte_pci_register(&rte_virtio_pmd);
}
+static bool
+rx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
+}
+
+static bool
+tx_offload_enabled(struct virtio_hw *hw)
+{
+ return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
+ vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
+}
+
/*
* Configure virtio device
* It returns 0 on success.
return -ENOTSUP;
}
+ hw->has_tx_offload = tx_offload_enabled(hw);
+ hw->has_rx_offload = rx_offload_enabled(hw);
+
if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)
/* Enable vector (0) for Link State Intrerrupt */
if (VTPCI_OPS(hw)->set_config_irq(hw, 0) ==
}
}
-static inline int
-tx_offload_enabled(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_NET_F_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6);
-}
/* avoid write operation when necessary, to lessen cache issues */
#define ASSIGN_UNLESS_EQUAL(var, val) do { \
static inline void
virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
struct rte_mbuf *cookie,
- int offload)
+ bool offload)
{
if (offload) {
if (cookie->ol_flags & PKT_TX_TCP_SEG)
struct virtio_net_hdr *hdr;
uint16_t idx;
uint16_t head_size = vq->hw->vtnet_hdr_size;
- int offload;
uint16_t i = 0;
idx = vq->vq_desc_head_idx;
start_dp = vq->vq_ring.desc;
- offload = tx_offload_enabled(vq->hw);
-
while (i < num) {
idx = idx & (vq->vq_nentries - 1);
dxp = &vq->vq_descx[idx];
cookies[i]->pkt_len -= head_size;
/* if offload disabled, it is not zeroed below, do it now */
- if (offload == 0) {
+ if (!vq->hw->has_tx_offload) {
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
}
- virtqueue_xmit_offload(hdr, cookies[i], offload);
+ virtqueue_xmit_offload(hdr, cookies[i],
+ vq->hw->has_tx_offload);
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
start_dp[idx].len = cookies[i]->data_len;
uint16_t head_idx, idx;
uint16_t head_size = vq->hw->vtnet_hdr_size;
struct virtio_net_hdr *hdr;
- int offload;
-
- offload = tx_offload_enabled(vq->hw);
head_idx = vq->vq_desc_head_idx;
idx = head_idx;
cookie->pkt_len -= head_size;
/* if offload disabled, it is not zeroed below, do it now */
- if (offload == 0) {
+ if (!vq->hw->has_tx_offload) {
ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
idx = start_dp[idx].next;
}
- virtqueue_xmit_offload(hdr, cookie, offload);
+ virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
do {
start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
return 0;
}
-static inline int
-rx_offload_enabled(struct virtio_hw *hw)
-{
- return vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO6);
-}
-
#define VIRTIO_MBUF_BURST_SZ 64
#define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
uint16_t
int error;
uint32_t i, nb_enqueued;
uint32_t hdr_size;
- int offload;
struct virtio_net_hdr *hdr;
nb_rx = 0;
nb_enqueued = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
for (i = 0; i < num ; i++) {
rxm = rcv_pkts[i];
if (hw->vlan_strip)
rte_vlan_strip(rxm);
- if (offload && virtio_rx_offload(rxm, hdr) < 0) {
+ if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.errors++;
continue;
uint32_t seg_res;
uint32_t hdr_size;
int32_t i;
- int offload;
nb_rx = 0;
if (unlikely(hw->started == 0))
seg_num = 1;
seg_res = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
rx_pkts[nb_rx] = rxm;
prev = rxm;
- if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
+ if (vq->hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
virtio_discard_rxbuf_inorder(vq, rxm);
rxvq->stats.errors++;
continue;
uint16_t extra_idx;
uint32_t seg_res;
uint32_t hdr_size;
- int offload;
nb_rx = 0;
if (unlikely(hw->started == 0))
extra_idx = 0;
seg_res = 0;
hdr_size = hw->vtnet_hdr_size;
- offload = rx_offload_enabled(hw);
while (i < nb_used) {
struct virtio_net_hdr_mrg_rxbuf *header;
rx_pkts[nb_rx] = rxm;
prev = rxm;
- if (offload && virtio_rx_offload(rxm, &header->hdr) < 0) {
+ if (hw->has_rx_offload &&
+ virtio_rx_offload(rxm, &header->hdr) < 0) {
virtio_discard_rxbuf(vq, rxm);
rxvq->stats.errors++;
continue;