X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fe1000%2Figb_rxtx.c;h=1bb4d851190947ec12178132ff8f421d1158908d;hb=0ef850c4f6a506370cc448a7631f4de966a2f991;hp=4a987e3cc01488d0066c66df5e2a0b7c5f84cb0b;hpb=cfaf45237c852c51c377adb1620f0bb71bd1db0b;p=dpdk.git diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 4a987e3cc0..1bb4d85119 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -56,7 +56,6 @@ #include #include #include -#include #include #include #include @@ -66,28 +65,28 @@ #include #include #include +#include #include #include "e1000_logs.h" #include "base/e1000_api.h" #include "e1000_ethdev.h" +#ifdef RTE_LIBRTE_IEEE1588 +#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST +#else +#define IGB_TX_IEEE1588_TMST 0 +#endif /* Bit Mask to indicate what bits required for building TX context */ #define IGB_TX_OFFLOAD_MASK ( \ PKT_TX_VLAN_PKT | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ - PKT_TX_TCP_SEG) - -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; + PKT_TX_TCP_SEG | \ + IGB_TX_IEEE1588_TMST) - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} +#define IGB_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK) /** * Structure associated with each descriptor of the RX ring of a RX queue. @@ -616,7 +615,7 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* * Set the Transmit Descriptor Tail (TDT). */ - E1000_PCI_REG_WRITE(txq->tdt_reg_addr, tx_id); + E1000_PCI_REG_WRITE_RELAXED(txq->tdt_reg_addr, tx_id); PMD_TX_LOG(DEBUG, "port_id=%u queue_id=%u tx_tail=%u nb_tx=%u", (unsigned) txq->port_id, (unsigned) txq->queue_id, (unsigned) tx_id, (unsigned) nb_tx); @@ -625,6 +624,52 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return nb_tx; } +/********************************************************************* + * + * TX prep functions + * + **********************************************************************/ +uint16_t +eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, ret; + struct rte_mbuf *m; + + for (i = 0; i < nb_pkts; i++) { + m = tx_pkts[i]; + + /* Check some limitations for TSO in hardware */ + if (m->ol_flags & PKT_TX_TCP_SEG) + if ((m->tso_segsz > IGB_TSO_MAX_MSS) || + (m->l2_len + m->l3_len + m->l4_len > + IGB_TSO_MAX_HDRLEN)) { + rte_errno = -EINVAL; + return i; + } + + if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + ret = rte_net_intel_cksum_prepare(m); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + /********************************************************************* * * RX functions @@ -739,7 +784,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) uint64_t pkt_flags; /* Check if VLAN present */ - pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? + PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0); #if defined(RTE_LIBRTE_IEEE1588) if (rx_status & E1000_RXD_STAT_TMST) @@ -757,7 +803,9 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status) */ static uint64_t error_to_pkt_flags_map[4] = { - 0, PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD, PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD }; return error_to_pkt_flags_map[(rx_status >> @@ -838,7 +886,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -1021,7 +1069,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -1372,6 +1420,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev, igb_reset_tx_queue(txq, dev); dev->tx_pkt_burst = eth_igb_xmit_pkts; + dev->tx_pkt_prepare = ð_igb_prep_pkts; dev->data->tx_queues[queue_idx] = txq; return 0; @@ -1520,11 +1569,6 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) struct igb_rx_queue *rxq; uint32_t desc = 0; - if (rx_queue_id >= dev->data->nb_rx_queues) { - PMD_RX_LOG(ERR, "Invalid RX queue id=%d", rx_queue_id); - return 0; - } - rxq = dev->data->rx_queues[rx_queue_id]; rxdp = &(rxq->rx_ring[rxq->rx_tail]); @@ -1537,7 +1581,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id) desc - rxq->nb_rx_desc]); } - return 0; + return desc; } int @@ -1957,7 +2001,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) /* Initialize software ring entries. */ for (i = 0; i < rxq->nb_rx_desc; i++) { volatile union e1000_adv_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed "