#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include "vmxnet3_logs.h"
#include "vmxnet3_ethdev.h"
-#define VMXNET3_TX_OFFLOAD_MASK ( \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG)
+#define VMXNET3_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_VLAN | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG)
#define VMXNET3_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ VMXNET3_TX_OFFLOAD_MASK)
static const uint32_t rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2};
}
void
-vmxnet3_dev_tx_queue_release(void *txq)
+vmxnet3_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
- vmxnet3_tx_queue_t *tq = txq;
+ vmxnet3_tx_queue_t *tq = dev->data->tx_queues[qid];
if (tq != NULL) {
/* Release mbufs */
}
void
-vmxnet3_dev_rx_queue_release(void *rxq)
+vmxnet3_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
{
int i;
- vmxnet3_rx_queue_t *rq = rxq;
+ vmxnet3_rx_queue_t *rq = dev->data->rx_queues[qid];
if (rq != NULL) {
/* Release mbufs */
}
PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.", completed);
+
+ /* To avoid compiler warnings when not in DEBUG mode. */
+ RTE_SET_USED(completed);
}
uint16_t
/* Non-TSO packet cannot occupy more than
* VMXNET3_MAX_TXD_PER_PKT TX descriptors.
*/
- if ((ol_flags & PKT_TX_TCP_SEG) == 0 &&
+ if ((ol_flags & RTE_MBUF_F_TX_TCP_SEG) == 0 &&
m->nb_segs > VMXNET3_MAX_TXD_PER_PKT) {
- rte_errno = -EINVAL;
+ rte_errno = EINVAL;
return i;
}
/* check that only supported TX offloads are requested. */
if ((ol_flags & VMXNET3_TX_OFFLOAD_NOTSUP_MASK) != 0 ||
- (ol_flags & PKT_TX_L4_MASK) ==
- PKT_TX_SCTP_CKSUM) {
- rte_errno = -ENOTSUP;
+ (ol_flags & RTE_MBUF_F_TX_L4_MASK) ==
+ RTE_MBUF_F_TX_SCTP_CKSUM) {
+ rte_errno = ENOTSUP;
return i;
}
#ifdef RTE_LIBRTE_ETHDEV_DEBUG
ret = rte_validate_tx_offload(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
#endif
ret = rte_net_intel_cksum_prepare(m);
if (ret != 0) {
- rte_errno = ret;
+ rte_errno = -ret;
return i;
}
}
struct rte_mbuf *txm = tx_pkts[nb_tx];
struct rte_mbuf *m_seg = txm;
int copy_size = 0;
- bool tso = (txm->ol_flags & PKT_TX_TCP_SEG) != 0;
+ bool tso = (txm->ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0;
/* # of descriptors needed for a packet. */
unsigned count = txm->nb_segs;
/* Add VLAN tag if present */
gdesc = txq->cmd_ring.base + first2fill;
- if (txm->ol_flags & PKT_TX_VLAN_PKT) {
+ if (txm->ol_flags & RTE_MBUF_F_TX_VLAN) {
gdesc->txd.ti = 1;
gdesc->txd.tci = txm->vlan_tci;
}
gdesc->txd.msscof = mss;
deferred += (rte_pktmbuf_pkt_len(txm) - gdesc->txd.hlen + mss - 1) / mss;
- } else if (txm->ol_flags & PKT_TX_L4_MASK) {
+ } else if (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
gdesc->txd.om = VMXNET3_OM_CSUM;
gdesc->txd.hlen = txm->l2_len + txm->l3_len;
- switch (txm->ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_TCP_CKSUM:
+ switch (txm->ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_TCP_CKSUM:
gdesc->txd.msscof = gdesc->txd.hlen +
offsetof(struct rte_tcp_hdr, cksum);
break;
- case PKT_TX_UDP_CKSUM:
- gdesc->txd.msscof = gdesc->txd.hlen + offsetof(struct udp_hdr, dgram_cksum);
+ case RTE_MBUF_F_TX_UDP_CKSUM:
+ gdesc->txd.msscof = gdesc->txd.hlen +
+ offsetof(struct rte_udp_hdr,
+ dgram_cksum);
break;
default:
PMD_TX_LOG(WARNING, "requested cksum offload not supported %#llx",
- txm->ol_flags & PKT_TX_L4_MASK);
+ txm->ol_flags & RTE_MBUF_F_TX_L4_MASK);
abort();
}
deferred++;
struct rte_ipv6_hdr *ipv6_hdr;
struct rte_tcp_hdr *tcp_hdr;
char *ptr;
+ uint8_t segs;
RTE_ASSERT(rcd->tcp);
- sizeof(struct rte_tcp_hdr);
ipv4_hdr = (struct rte_ipv4_hdr *)(ptr + hlen);
- hlen += (ipv4_hdr->version_ihl & RTE_IPV4_HDR_IHL_MASK) *
- RTE_IPV4_IHL_MULTIPLIER;
+ hlen += rte_ipv4_hdr_len(ipv4_hdr);
} else if (rcd->v6) {
if (unlikely(slen < hlen + sizeof(struct rte_ipv6_hdr)))
return hw->mtu - sizeof(struct rte_ipv6_hdr) -
tcp_hdr = (struct rte_tcp_hdr *)(ptr + hlen);
hlen += (tcp_hdr->data_off & 0xf0) >> 2;
- if (rxm->udata64 > 1)
- return (rte_pktmbuf_pkt_len(rxm) - hlen +
- rxm->udata64 - 1) / rxm->udata64;
+ segs = *vmxnet3_segs_dynfield(rxm);
+ if (segs > 1)
+ return (rte_pktmbuf_pkt_len(rxm) - hlen + segs - 1) / segs;
else
return hw->mtu - hlen + sizeof(struct rte_ether_hdr);
}
(const Vmxnet3_RxCompDescExt *)rcd;
rxm->tso_segsz = rcde->mss;
- rxm->udata64 = rcde->segCnt;
- ol_flags |= PKT_RX_LRO;
+ *vmxnet3_segs_dynfield(rxm) = rcde->segCnt;
+ ol_flags |= RTE_MBUF_F_RX_LRO;
}
} else { /* Offloads set in eop */
/* Check for RSS */
if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
- ol_flags |= PKT_RX_RSS_HASH;
+ ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
rxm->hash.rss = rcd->rssHash;
}
/* Check for hardware stripped VLAN tag */
if (rcd->ts) {
- ol_flags |= (PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED);
+ ol_flags |= (RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED);
rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
}
/* Check packet type, checksum errors, etc. */
if (rcd->cnc) {
- ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN;
} else {
if (rcd->v4) {
packet_type |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
if (rcd->ipc)
- ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
else
- ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
if (rcd->tuc) {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (rcd->tcp)
packet_type |= RTE_PTYPE_L4_TCP;
else
} else {
if (rcd->tcp) {
packet_type |= RTE_PTYPE_L4_TCP;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else if (rcd->udp) {
packet_type |= RTE_PTYPE_L4_UDP;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
} else if (rcd->v6) {
packet_type |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
if (rcd->tuc) {
- ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
if (rcd->tcp)
packet_type |= RTE_PTYPE_L4_TCP;
else
} else {
if (rcd->tcp) {
packet_type |= RTE_PTYPE_L4_TCP;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
} else if (rcd->udp) {
packet_type |= RTE_PTYPE_L4_UDP;
- ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
}
}
} else {
}
/* Old variants of vmxnet3 do not provide MSS */
- if ((ol_flags & PKT_RX_LRO) && rxm->tso_segsz == 0)
+ if ((ol_flags & RTE_MBUF_F_RX_LRO) && rxm->tso_segsz == 0)
rxm->tso_segsz = vmxnet3_guess_mss(hw,
rcd, rxm);
}
RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
- if (rxm->data_len) {
+ if (likely(start && rxm->data_len > 0)) {
start->pkt_len += rxm->data_len;
start->nb_segs++;
rxq->last_seg->next = rxm;
rxq->last_seg = rxm;
} else {
+ PMD_RX_LOG(ERR, "Error received empty or out of order frame.");
+ rxq->stats.drop_total++;
+ rxq->stats.drop_err++;
+
rte_pktmbuf_free_seg(rxm);
}
}
cmdInfo->setRSSFields = 0;
port_rss_conf = &dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+ if ((port_rss_conf->rss_hf & VMXNET3_MANDATORY_V4_RSS) !=
+ VMXNET3_MANDATORY_V4_RSS) {
+ PMD_INIT_LOG(WARNING, "RSS: IPv4/6 TCP is required for vmxnet3 v4 RSS,"
+ "automatically setting it");
+ port_rss_conf->rss_hf |= VMXNET3_MANDATORY_V4_RSS;
+ }
+
rss_hf = port_rss_conf->rss_hf &
(VMXNET3_V4_RSS_MASK | VMXNET3_RSS_OFFLOAD_ALL);
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_TCPIP6;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_UDP)
cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_UDP)
cmdInfo->setRSSFields |= VMXNET3_RSS_FIELDS_UDPIP6;
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
/* loading hashType */
dev_rss_conf->hashType = 0;
rss_hf = port_rss_conf->rss_hf & VMXNET3_RSS_OFFLOAD_ALL;
- if (rss_hf & ETH_RSS_IPV4)
+ if (rss_hf & RTE_ETH_RSS_IPV4)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV4;
- if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV4_TCP)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV4;
- if (rss_hf & ETH_RSS_IPV6)
+ if (rss_hf & RTE_ETH_RSS_IPV6)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_IPV6;
- if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ if (rss_hf & RTE_ETH_RSS_NONFRAG_IPV6_TCP)
dev_rss_conf->hashType |= VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
return VMXNET3_SUCCESS;