X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fe1000%2Figb_rxtx.c;h=5d0d3cd6c3b49c2a2b1623e7066c57a03270e1d8;hb=5526b0cbd50c757830b902e513281b835c98a2c1;hp=a47c640a6436b58079b715f9ef7c3982ff5be470;hpb=f03723017a2a5ea421df821eb0ff9a0bfcacff4f;p=dpdk.git diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index a47c640a64..5d0d3cd6c3 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -65,6 +65,7 @@ #include #include #include +#include #include #include "e1000_logs.h" @@ -78,6 +79,9 @@ PKT_TX_L4_MASK | \ PKT_TX_TCP_SEG) +#define IGB_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK) + /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -614,6 +618,52 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + /* Check some limitations for TSO in hardware */ + if (m->ol_flags & PKT_TX_TCP_SEG) + if ((m->tso_segsz > IGB_TSO_MAX_MSS) || + (m->l2_len + m->l3_len + m->l4_len > + IGB_TSO_MAX_HDRLEN)) { + rte_errno = -EINVAL; + return i; + } + + if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + /********************************************************************* * * RX functions @@ -747,7 +797,9 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) */ static uint64_t error_to_pkt_flags_map[4] = { - 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD, PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD }; return error_to_pkt_flags_map[(rx_status >> @@ -1362,6 +1414,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, igb_reset_tx_queue(txq, dev); dev->tx_pkt_burst = eth_igb_xmit_pkts; + dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; return 0; @@ -1527,7 +1580,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) desc - rxq->nb_rx_desc]); } - return 0; + return desc; } int