X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Ffm10k%2Ffm10k_rxtx.c;h=5c311218395c49bc2ef9ca0dc41ed238dee5c439;hb=57bb45b37bdf3752d95966adbf902b426120eb9d;hp=1d0f09d258252c3bf9db5500dbb3d89babffcfe3;hpb=d6db681bf9b8095da2fed653ec5b89e5f165eaa8;p=dpdk.git diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c index 1d0f09d258..5c31121839 100644 --- a/drivers/net/fm10k/fm10k_rxtx.c +++ b/drivers/net/fm10k/fm10k_rxtx.c @@ -134,7 +134,7 @@ fm10k_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * So, always PKT_RX_VLAN flag is set and vlan_tci * is valid for each RX packet's mbuf. */ - mbuf->ol_flags |= PKT_RX_VLAN; + mbuf->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; mbuf->vlan_tci = desc.w.vlan; /** * mbuf->vlan_tci_outer is an idle field in fm10k driver, @@ -295,7 +295,7 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, * So, always PKT_RX_VLAN flag is set and vlan_tci * is valid for each RX packet's mbuf. */ - first_seg->ol_flags |= PKT_RX_VLAN; + first_seg->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; first_seg->vlan_tci = desc.w.vlan; /** * mbuf->vlan_tci_outer is an idle field in fm10k driver, @@ -366,6 +366,33 @@ fm10k_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return nb_rcv; } +uint32_t +fm10k_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) +{ +#define FM10K_RXQ_SCAN_INTERVAL 4 + volatile union fm10k_rx_desc *rxdp; + struct fm10k_rx_queue *rxq; + uint16_t desc = 0; + + rxq = dev->data->rx_queues[rx_queue_id]; + rxdp = &rxq->hw_ring[rxq->next_dd]; + while ((desc < rxq->nb_desc) && + rxdp->w.status & rte_cpu_to_le_16(FM10K_RXD_STATUS_DD)) { + /** + * Check the DD bit of a rx descriptor of each group of 4 desc, + * to avoid checking too frequently and downgrading performance + * too much. + */ + desc += FM10K_RXQ_SCAN_INTERVAL; + rxdp += FM10K_RXQ_SCAN_INTERVAL; + if (rxq->next_dd + desc >= rxq->nb_desc) + rxdp = &rxq->hw_ring[rxq->next_dd + desc - + rxq->nb_desc]; + } + + return desc; +} + int fm10k_dev_rx_descriptor_done(void *rx_queue, uint16_t offset) { @@ -592,8 +619,9 @@ static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb) rte_cpu_to_le_16(rte_pktmbuf_data_len(mb)); if (mb->ol_flags & PKT_TX_TCP_SEG) { - hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len + - mb->l3_len + mb->l4_len; + hdrlen = mb->l2_len + mb->l3_len + mb->l4_len; + hdrlen += (mb->ol_flags & PKT_TX_TUNNEL_MASK) ? + mb->outer_l2_len + mb->outer_l3_len : 0; if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG) hdrlen += sizeof(struct fm10k_ftag); @@ -671,25 +699,25 @@ fm10k_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, if ((m->ol_flags & PKT_TX_TCP_SEG) && (m->tso_segsz < FM10K_TSO_MINMSS)) { - rte_errno = -EINVAL; + rte_errno = EINVAL; return i; } if (m->ol_flags & FM10K_TX_OFFLOAD_NOTSUP_MASK) { - rte_errno = -ENOTSUP; + rte_errno = ENOTSUP; return i; } #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } #endif ret = rte_net_intel_cksum_prepare(m); if (ret != 0) { - rte_errno = ret; + rte_errno = -ret; return i; } }