X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_rxtx.c;h=3899907d6d0f424c659ec3e221c5255bdd14347c;hb=78156d38e112b33032eedfada65b0df8b047bc31;hp=5189ee6357ae6ffd91e07537c8d89cf8425d57c1;hpb=cd4e7b3250c2c93bb33aa7b01e7165cbfd78021d;p=dpdk.git diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c index 5189ee6357..3899907d6d 100644 --- a/drivers/net/enic/enic_rxtx.c +++ b/drivers/net/enic/enic_rxtx.c @@ -4,7 +4,7 @@ */ #include -#include +#include #include #include @@ -42,9 +42,9 @@ enic_dummy_recv_pkts(__rte_unused void *rx_queue, return 0; } -uint16_t -enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) +static inline uint16_t +enic_recv_pkts_common(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, const bool use_64b_desc) { struct vnic_rq *sop_rq = rx_queue; struct vnic_rq *data_rq; @@ -62,10 +62,15 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t seg_length; struct rte_mbuf *first_seg = sop_rq->pkt_first_seg; struct rte_mbuf *last_seg = sop_rq->pkt_last_seg; + const int desc_size = use_64b_desc ? + sizeof(struct cq_enet_rq_desc_64) : + sizeof(struct cq_enet_rq_desc); + RTE_BUILD_BUG_ON(sizeof(struct cq_enet_rq_desc_64) != 64); cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */ - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + cqd_ptr = (struct cq_desc *)((uintptr_t)(cq->ring.descs) + + (uintptr_t)cq_idx * desc_size); color = cq->last_color; data_rq = &enic->rq[sop_rq->data_queue_idx]; @@ -78,15 +83,26 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, struct cq_desc cqd; uint8_t packet_error; uint16_t ciflags; + uint8_t tc; max_rx--; + tc = *(volatile uint8_t *)((uintptr_t)cqd_ptr + desc_size - 1); /* Check for pkts available */ - if ((cqd_ptr->type_color & CQ_DESC_COLOR_MASK_NOSHIFT) == color) + if ((tc & CQ_DESC_COLOR_MASK_NOSHIFT) == color) break; /* Get the cq descriptor and extract rq info from it */ cqd = *cqd_ptr; + /* + * The first 16B of 64B descriptor is identical to the + * 16B descriptor, except type_color. Copy type_color + * from the 64B descriptor into the 16B descriptor's + * field, so the code below can assume the 16B + * descriptor format. + */ + if (use_64b_desc) + cqd.type_color = tc; rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK; rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK; @@ -109,7 +125,8 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, cq_idx++; /* Prefetch next mbuf & desc while processing current one */ - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + cqd_ptr = (struct cq_desc *)((uintptr_t)(cq->ring.descs) + + (uintptr_t)cq_idx * desc_size); rte_enic_prefetch(cqd_ptr); ciflags = enic_cq_rx_desc_ciflags( @@ -215,6 +232,18 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rx; } +uint16_t +enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + return enic_recv_pkts_common(rx_queue, rx_pkts, nb_pkts, false); +} + +uint16_t +enic_recv_pkts_64(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + return enic_recv_pkts_common(rx_queue, rx_pkts, nb_pkts, true); +} + uint16_t enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) @@ -329,7 +358,8 @@ enic_noscatter_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return rx - rx_pkts; } -static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) +static inline void enic_free_wq_bufs(struct vnic_wq *wq, + uint16_t completed_index) { struct rte_mbuf *buf; struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; @@ -371,7 +401,7 @@ static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) { - u16 completed_index; + uint16_t completed_index; completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; @@ -393,11 +423,22 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, for (i = 0; i != nb_pkts; i++) { m = tx_pkts[i]; - if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { - rte_errno = EINVAL; - return i; - } ol_flags = m->ol_flags; + if (!(ol_flags & PKT_TX_TCP_SEG)) { + if (unlikely(m->pkt_len > ENIC_TX_MAX_PKT_SIZE)) { + rte_errno = EINVAL; + return i; + } + } else { + uint16_t header_len; + + header_len = m->l2_len + m->l3_len + m->l4_len; + if (m->tso_segsz + header_len > ENIC_TX_MAX_PKT_SIZE) { + rte_errno = EINVAL; + return i; + } + } + if (ol_flags & wq->tx_offload_notsup_mask) { rte_errno = ENOTSUP; return i; @@ -405,13 +446,13 @@ uint16_t enic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } }