X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fvmxnet3%2Fvmxnet3_rxtx.c;h=dd99684bee4d5d1dbd91bb3a2b5914b954beb28b;hb=5908712aa5bfedc5d2d1d18df46e8673794882af;hp=57609023960c5c50dd90d219d4791407afafd371;hpb=a7c528e5d71ff3f569898d268f9de129fdfc152b;p=dpdk.git diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 5760902396..dd99684bee 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -361,7 +361,7 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, */ if ((ol_flags & PKT_TX_TCP_SEG) == 0 && m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } @@ -369,20 +369,20 @@ vmxnet3_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 || (ol_flags & PKT_TX_L4_MASK) == PKT_TX_SCTP_CKSUM) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } } @@ -541,10 +541,13 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, switch (txm->ol_flags & PKT_TX_L4_MASK) { case PKT_TX_TCP_CKSUM: - gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct tcp_hdr, cksum); + gdesc->txd.msscof = gdesc->txd.hlen + + offsetof(struct rte_tcp_hdr, cksum); break; case PKT_TX_UDP_CKSUM: - gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum); + gdesc->txd.msscof = gdesc->txd.hlen + + offsetof(struct rte_udp_hdr, + dgram_cksum); break; default: PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx", @@ -669,7 +672,7 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, uint32_t hlen, slen; struct rte_ipv4_hdr *ipv4_hdr; struct rte_ipv6_hdr *ipv6_hdr; - struct tcp_hdr *tcp_hdr; + struct rte_tcp_hdr *tcp_hdr; char *ptr; RTE_ASSERT(rcd->tcp); @@ -681,15 +684,15 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, if (rcd->v4) { if (unlikely(slen < hlen + sizeof(struct rte_ipv4_hdr))) return hw->mtu - sizeof(struct rte_ipv4_hdr) - - sizeof(struct tcp_hdr); + - sizeof(struct rte_tcp_hdr); ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen); - hlen += (ipv4_hdr->version_ihl & IPV4_HDR_IHL_MASK) * - IPV4_IHL_MULTIPLIER; + hlen += (ipv4_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) * + RTE_IPV4_IHL_MULTIPLIER; } else if (rcd->v6) { if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr))) return hw->mtu - sizeof(struct rte_ipv6_hdr) - - sizeof(struct tcp_hdr); + sizeof(struct rte_tcp_hdr); ipv6_hdr = (struct rte_ipv6_hdr *)(ptr + hlen); hlen += sizeof(struct rte_ipv6_hdr); @@ -701,11 +704,11 @@ vmxnet3_guess_mss(struct vmxnet3_hw *hw, const Vmxnet3_RxCompDesc *rcd, } } - if (unlikely(slen < hlen + sizeof(struct tcp_hdr))) - return hw->mtu - hlen - sizeof(struct tcp_hdr) + + if (unlikely(slen < hlen + sizeof(struct rte_tcp_hdr))) + return hw->mtu - hlen - sizeof(struct rte_tcp_hdr) + sizeof(struct rte_ether_hdr); - tcp_hdr = (struct tcp_hdr *)(ptr + hlen); + tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen); hlen += (tcp_hdr->data_off & 0xf0) >> 2; if (rxm->udata64 > 1) @@ -1308,6 +1311,14 @@ vmxnet3_v4_rss_configure(struct rte_eth_dev *dev) cmdInfo->setRSSFields = 0; port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf; + + if ((port_rss_conf->rss_hf & VMXNET3_MANDATORY_V4_RSS) != + VMXNET3_MANDATORY_V4_RSS) { + PMD_INIT_LOG(WARNING, "RSS: IPv4/6 TCP is required for vmxnet3 v4 RSS," + "automatically setting it"); + port_rss_conf->rss_hf |= VMXNET3_MANDATORY_V4_RSS; + } + rss_hf = port_rss_conf->rss_hf & (VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);