X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_rxtx.c;h=c9bb04a0e395ea11a3696a246a088f649c05797d;hb=51d5f8ec953c80a87fb1da83ff3cb03d1d32ef2b;hp=4eca3d6f10909e889f8c0204e5d0a7651ed0884b;hpb=0d71e97fca80b85fda22f3e816cb737dc8d8ae32;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c index 4eca3d6f10..c9bb04a0e3 100644 --- a/drivers/net/fm10k/fm10k_rxtx.c +++ b/drivers/net/fm10k/fm10k_rxtx.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -35,6 +35,7 @@ #include #include +#include #include "fm10k.h" #include "base/fm10k_type.h" @@ -65,6 +66,18 @@ static inline void dump_rxd(union fm10k_rx_desc *rxd) } #endif +#define FM10K_TX_OFFLOAD_MASK ( \ + PKT_TX_VLAN_PKT | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_L4_MASK | \ + PKT_TX_TCP_SEG) + +#define FM10K_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ FM10K_TX_OFFLOAD_MASK) + +/* @note: When this function is changed, make corresponding change to + * fm10k_dev_supported_ptypes_get() + */ static inline void rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d) { @@ -98,17 +111,15 @@ rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d) (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) == (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE))) m->ol_flags |= PKT_RX_IP_CKSUM_BAD; + else + m->ol_flags |= PKT_RX_IP_CKSUM_GOOD; if (unlikely((d->d.staterr & (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) == (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E))) m->ol_flags |= PKT_RX_L4_CKSUM_BAD; - - if (unlikely(d->d.staterr & FM10K_RXD_STATUS_HBO)) - m->ol_flags |= PKT_RX_HBUF_OVERFLOW; - - if (unlikely(d->d.staterr & FM10K_RXD_STATUS_RXE)) - m->ol_flags |= PKT_RX_RECIP_ERR; + else + m->ol_flags |= PKT_RX_L4_CKSUM_GOOD; } uint16_t @@ -127,10 +138,10 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts = RTE_MIN(nb_pkts, q->alloc_thresh); for (count = 0; count < nb_pkts; ++count) { + if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD)) + break; mbuf = q->sw_ring[next_dd]; desc = q->hw_ring[next_dd]; - if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) - break; #ifdef RTE_LIBRTE_FM10K_DEBUG_RX dump_rxd(&desc); #endif @@ -152,6 +163,12 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ mbuf->ol_flags |= PKT_RX_VLAN_PKT; mbuf->vlan_tci = desc.w.vlan; + /** + * mbuf->vlan_tci_outer is an idle field in fm10k driver, + * so it can be selected to store sglort value. + */ + if (q->rx_ftag_en) + mbuf->vlan_tci_outer = rte_le_to_cpu_16(desc.w.sglort); rx_pkts[count] = mbuf; if (++next_dd == q->nb_desc) { @@ -235,10 +252,10 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, nb_seg = RTE_MIN(nb_pkts, q->alloc_thresh); for (count = 0; count < nb_seg; count++) { + if (!(q->hw_ring[next_dd].d.staterr & FM10K_RXD_STATUS_DD)) + break; mbuf = q->sw_ring[next_dd]; desc = q->hw_ring[next_dd]; - if (!(desc.d.staterr & FM10K_RXD_STATUS_DD)) - break; #ifdef RTE_LIBRTE_FM10K_DEBUG_RX dump_rxd(&desc); #endif @@ -307,6 +324,13 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, */ first_seg->ol_flags |= PKT_RX_VLAN_PKT; first_seg->vlan_tci = desc.w.vlan; + /** + * mbuf->vlan_tci_outer is an idle field in fm10k driver, + * so it can be selected to store sglort value. + */ + if (q->rx_ftag_en) + first_seg->vlan_tci_outer = + rte_le_to_cpu_16(desc.w.sglort); /* Prefetch data of first segment, if configured to do so. */ rte_packet_prefetch((char *)first_seg->buf_addr + @@ -394,6 +418,51 @@ fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) return ret; } +/* + * Free multiple TX mbuf at a time if they are in the same pool + * + * @txep: software desc ring index that starts to free + * @num: number of descs to free + * + */ +static inline void tx_free_bulk_mbuf(struct rte_mbuf **txep, int num) +{ + struct rte_mbuf *m, *free[RTE_FM10K_TX_MAX_FREE_BUF_SZ]; + int i; + int nb_free = 0; + + if (unlikely(num == 0)) + return; + + m = rte_pktmbuf_prefree_seg(txep[0]); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < num; i++) { + m = rte_pktmbuf_prefree_seg(txep[i]); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + txep[i] = NULL; + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < num; i++) { + m = rte_pktmbuf_prefree_seg(txep[i]); + if (m != NULL) + rte_mempool_put(m->pool, m); + txep[i] = NULL; + } + } +} + static inline void tx_free_descriptors(struct fm10k_tx_queue *q) { uint16_t next_rs, count = 0; @@ -410,11 +479,7 @@ static inline void tx_free_descriptors(struct fm10k_tx_queue *q) * including nb_desc */ if (q->last_free > next_rs) { count = q->nb_desc - q->last_free; - while (q->last_free < q->nb_desc) { - rte_pktmbuf_free_seg(q->sw_ring[q->last_free]); - q->sw_ring[q->last_free] = NULL; - ++q->last_free; - } + tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count); q->last_free = 0; } @@ -422,10 +487,10 @@ static inline void tx_free_descriptors(struct fm10k_tx_queue *q) q->nb_free += count + (next_rs + 1 - q->last_free); /* free buffers from last_free, up to and including next_rs */ - while (q->last_free <= next_rs) { - rte_pktmbuf_free_seg(q->sw_ring[q->last_free]); - q->sw_ring[q->last_free] = NULL; - ++q->last_free; + if (q->last_free <= next_rs) { + count = next_rs - q->last_free + 1; + tx_free_bulk_mbuf(&q->sw_ring[q->last_free], count); + q->last_free += count; } if (q->last_free == q->nb_desc) @@ -457,6 +522,8 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb) q->nb_free -= mb->nb_segs; q->hw_ring[q->next_free].flags = 0; + if (q->tx_ftag_en) + q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_FTAG; /* set checksum flags on first descriptor of packet. SCTP checksum * offload is not supported, but we do not explicitly check for this * case in favor of greatly simplified processing. */ @@ -540,3 +607,41 @@ fm10k_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return count; } + +uint16_t +fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + if ((m->ol_flags & PKT_TX_TCP_SEG) && + (m->tso_segsz < FM10K_TSO_MINMSS)) { + rte_errno = -EINVAL; + return i; + } + + if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +}