#define rte_packet_prefetch(p) do {} while (0)
#endif
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
- struct rte_mbuf *m;
-
- m = __rte_mbuf_raw_alloc(mp);
- __rte_mbuf_sanity_check_raw(m, 0);
- return m;
-}
-
static inline uint16_t
enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
{
return le32_to_cpu(cqrd->rss_hash);
}
-static inline uint8_t
-enic_cq_rx_desc_fcs_ok(struct cq_enet_rq_desc *cqrd)
-{
- return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ==
- CQ_ENET_RQ_DESC_FLAGS_FCS_OK);
-}
-
static inline uint16_t
enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
{
CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
}
-static inline uint64_t
-enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd)
+static inline uint8_t
+enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out)
{
struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
uint16_t bwflags;
+ int ret = 0;
uint64_t pkt_err_flags = 0;
bwflags = enic_cq_rx_desc_bwflags(cqrd);
-
- /* Check for packet error. Can't be more specific than MAC error */
- if (enic_cq_rx_desc_packet_error(bwflags)) {
- pkt_err_flags |= PKT_RX_MAC_ERR;
+ if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) {
+ pkt_err_flags = PKT_RX_MAC_ERR;
+ ret = 1;
}
-
- /* Check for bad FCS. MAC error isn't quite, but no other choice */
- if (!enic_cq_rx_desc_fcs_ok(cqrd)) {
- pkt_err_flags |= PKT_RX_MAC_ERR;
- }
- return pkt_err_flags;
+ *pkt_err_flags_out = pkt_err_flags;
+ return ret;
}
/*
/* VLAN stripping */
if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
- pkt_flags |= PKT_RX_VLAN_PKT;
+ pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
} else {
mbuf->vlan_tci = 0;
enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
{
uint32_t d = i0 + i1;
- ASSERT(i0 < n_descriptors);
- ASSERT(i1 < n_descriptors);
+ RTE_ASSERT(i0 < n_descriptors);
+ RTE_ASSERT(i1 < n_descriptors);
d -= (d >= n_descriptors) ? n_descriptors : 0;
return d;
}
nb_hold = rq->rx_nb_hold; /* mbufs held by software */
while (nb_rx < nb_pkts) {
- uint16_t rx_pkt_len;
volatile struct rq_enet_desc *rqd_ptr;
dma_addr_t dma_addr;
struct cq_desc cqd;
uint64_t ol_err_flags;
+ uint8_t packet_error;
/* Check for pkts available */
color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
/* allocate a new mbuf */
- nmb = rte_rxmbuf_alloc(rq->mp);
+ nmb = rte_mbuf_raw_alloc(rq->mp);
if (nmb == NULL) {
dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
enic->port_id, (unsigned)rq->index);
break;
}
- /* Check for FCS or packet errors */
- ol_err_flags = enic_cq_rx_to_pkt_err_flags(&cqd);
- if (ol_err_flags == 0)
- rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
- else
- rx_pkt_len = 0;
+ /* A packet error means descriptor and data are untrusted */
+ packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
/* Get the mbuf to return and replace with one just allocated */
rxmb = rq->mbuf_ring[rx_id];
+ rx_id);
/* Push descriptor for newly allocated mbuf */
- dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
+ dma_addr = (dma_addr_t)(nmb->buf_physaddr
+ + RTE_PKTMBUF_HEADROOM);
rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
- rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
+ rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
+ - RTE_PKTMBUF_HEADROOM);
/* Fill in the rest of the mbuf */
rxmb->data_off = RTE_PKTMBUF_HEADROOM;
rxmb->nb_segs = 1;
rxmb->next = NULL;
- rxmb->pkt_len = rx_pkt_len;
- rxmb->data_len = rx_pkt_len;
rxmb->port = enic->port_id;
- rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
- rxmb->ol_flags = ol_err_flags;
- if (!ol_err_flags)
+ if (!packet_error) {
+ rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
+ rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
enic_cq_rx_to_pkt_flags(&cqd, rxmb);
+ } else {
+ rxmb->pkt_len = 0;
+ rxmb->packet_type = 0;
+ rxmb->ol_flags = 0;
+ }
+ rxmb->data_len = rxmb->pkt_len;
/* prefetch mbuf data for caller */
rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,