net: add rte prefix to IP structure
[dpdk.git] / drivers / net / virtio / virtio_rxtx.c
index ee30084..b4c54c1 100644 (file)
@@ -479,13 +479,14 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
        /* common case: header is not fragmented */
        if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
                        m->l4_len)) {
-               struct ipv4_hdr *iph;
-               struct ipv6_hdr *ip6h;
+               struct rte_ipv4_hdr *iph;
+               struct rte_ipv6_hdr *ip6h;
                struct tcp_hdr *th;
                uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
                uint32_t tmp;
 
-               iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
+               iph = rte_pktmbuf_mtod_offset(m,
+                                       struct rte_ipv4_hdr *, m->l2_len);
                th = RTE_PTR_ADD(iph, m->l3_len);
                if ((iph->version_ihl >> 4) == 4) {
                        iph->hdr_checksum = 0;
@@ -494,7 +495,7 @@ virtio_tso_fix_cksum(struct rte_mbuf *m)
                        ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
                                m->l3_len);
                } else {
-                       ip6h = (struct ipv6_hdr *)iph;
+                       ip6h = (struct rte_ipv6_hdr *)iph;
                        ip_paylen = ip6h->payload_len;
                }
 
@@ -1092,7 +1093,7 @@ static inline void
 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
 {
        uint32_t s = mbuf->pkt_len;
-       struct ether_addr *ea;
+       struct rte_ether_addr *ea;
 
        stats->bytes += s;
 
@@ -1109,13 +1110,13 @@ virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
                        stats->size_bins[0]++;
                else if (s < 1519)
                        stats->size_bins[6]++;
-               else if (s >= 1519)
+               else
                        stats->size_bins[7]++;
        }
 
-       ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
-       if (is_multicast_ether_addr(ea)) {
-               if (is_broadcast_ether_addr(ea))
+       ea = rte_pktmbuf_mtod(mbuf, struct rte_ether_addr *);
+       if (rte_is_multicast_ether_addr(ea)) {
+               if (rte_is_broadcast_ether_addr(ea))
                        stats->broadcast++;
                else
                        stats->multicast++;
@@ -1244,7 +1245,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
                PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
 
-               if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+               if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
                        PMD_RX_LOG(ERR, "Packet drop");
                        nb_enqueued++;
                        virtio_discard_rxbuf(vq, rxm);
@@ -1347,7 +1348,7 @@ virtio_recv_pkts_packed(void *rx_queue, struct rte_mbuf **rx_pkts,
 
                PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
 
-               if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+               if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
                        PMD_RX_LOG(ERR, "Packet drop");
                        nb_enqueued++;
                        virtio_discard_rxbuf(vq, rxm);
@@ -1461,7 +1462,7 @@ virtio_recv_pkts_inorder(void *rx_queue,
 
                rxm = rcv_pkts[i];
 
-               if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+               if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
                        PMD_RX_LOG(ERR, "Packet drop");
                        nb_enqueued++;
                        virtio_discard_rxbuf_inorder(vq, rxm);
@@ -1653,7 +1654,7 @@ virtio_recv_mergeable_pkts(void *rx_queue,
 
                rxm = rcv_pkts[i];
 
-               if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+               if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
                        PMD_RX_LOG(ERR, "Packet drop");
                        nb_enqueued++;
                        virtio_discard_rxbuf(vq, rxm);
@@ -1832,7 +1833,7 @@ virtio_recv_mergeable_pkts_packed(void *rx_queue,
 
                rxm = rcv_pkts[i];
 
-               if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
+               if (unlikely(len[i] < hdr_size + RTE_ETHER_HDR_LEN)) {
                        PMD_RX_LOG(ERR, "Packet drop");
                        nb_enqueued++;
                        virtio_discard_rxbuf(vq, rxm);
@@ -2003,6 +2004,8 @@ virtio_xmit_pkts_packed(void *tx_queue, struct rte_mbuf **tx_pkts,
                                rte_pktmbuf_free(txm);
                                continue;
                        }
+                       /* vlan_insert may add a header mbuf */
+                       tx_pkts[nb_tx] = txm;
                }
 
                /* optimize ring usage */
@@ -2090,6 +2093,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                                rte_pktmbuf_free(txm);
                                continue;
                        }
+                       /* vlan_insert may add a header mbuf */
+                       tx_pkts[nb_tx] = txm;
                }
 
                /* optimize ring usage */
@@ -2193,6 +2198,8 @@ virtio_xmit_pkts_inorder(void *tx_queue,
                                rte_pktmbuf_free(txm);
                                continue;
                        }
+                       /* vlan_insert may add a header mbuf */
+                       tx_pkts[nb_tx] = txm;
                }
 
                /* optimize ring usage */