X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fvmxnet3%2Fvmxnet3_rxtx.c;h=7794d74214b774edd6aa9f8ba0a3f4103704b5bf;hb=e0c103a79c0e9d72cf78e3643a85b6869599f79d;hp=a6a69df22acf9c958714003c5de530f532eb0ccf;hpb=24ac604ef7469eb5773c2504b313dd00257f8df3;p=dpdk.git diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index a6a69df22a..7794d74214 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -361,7 +361,7 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, */ if ((ol_flags & PKT_TX_TCP_SEG) == 0 && m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } @@ -369,20 +369,20 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 || (ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -541,10 +541,13 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, switch (txm->ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: - gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum); + gdesc->txd.msscof = gdesc->txd.hlen + + offsetof(struct rte_tcp_hdr, cksum); break; case PKT_TX_UDP_CKSUM: - gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum); + gdesc->txd.msscof = gdesc->txd.hlen + + offsetof(struct rte_udp_hdr, + dgram_cksum); break; default: PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx", @@ -669,7 +672,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, uint32_t hlen, slen; struct rte_ipv4_hdr *ipv4_hdr; struct rte_ipv6_hdr *ipv6_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; char *ptr; RTE_ASSERT(rcd->tcp); @@ -681,7 +684,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, if (rcd->v4) { if (unlikely(slen < hlen + sizeof(struct rte_ipv4_hdr))) return hw->mtu - sizeof(struct rte_ipv4_hdr) - - sizeof(struct tcp_hdr); + - sizeof(struct rte_tcp_hdr); ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen); hlen += (ipv4_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) * @@ -689,7 +692,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, } else if (rcd->v6) { if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr))) return hw->mtu - sizeof(struct rte_ipv6_hdr) - - sizeof(struct tcp_hdr); + sizeof(struct rte_tcp_hdr); ipv6_hdr = (struct rte_ipv6_hdr *)(ptr + hlen); hlen += sizeof(struct rte_ipv6_hdr); @@ -701,11 +704,11 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, } } - if (unlikely(slen < hlen + sizeof(struct tcp_hdr))) - return hw->mtu - hlen - sizeof(struct tcp_hdr) + + if (unlikely(slen < hlen + sizeof(struct rte_tcp_hdr))) + return hw->mtu - hlen - sizeof(struct rte_tcp_hdr) + sizeof(struct rte_ether_hdr); - tcp_hdr = (struct tcp_hdr *)(ptr + hlen); + tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen); hlen += (tcp_hdr->data_off & 0xf0) >> 2; if (rxm->udata64 > 1)