net/virtio-user: move vhost-user specific code
[dpdk.git] / drivers / net / e1000 / igb_rxtx.c
index 4a987e3..5d0d3cd 100644 (file)
@@ -1,7 +1,7 @@
 /*-
  *   BSD LICENSE
  *
- *   Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
  *   All rights reserved.
  *
  *   Redistribution and use in source and binary forms, with or without
@@ -56,7 +56,6 @@
 #include <rte_lcore.h>
 #include <rte_atomic.h>
 #include <rte_branch_prediction.h>
-#include <rte_ring.h>
 #include <rte_mempool.h>
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
@@ -66,6 +65,7 @@
 #include <rte_udp.h>
 #include <rte_tcp.h>
 #include <rte_sctp.h>
+#include <rte_net.h>
 #include <rte_string_fns.h>
 
 #include "e1000_logs.h"
                PKT_TX_L4_MASK |                 \
                PKT_TX_TCP_SEG)
 
-static inline struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
-       struct rte_mbuf *m;
-
-       m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check_raw(m, 0);
-       return m;
-}
+#define IGB_TX_OFFLOAD_NOTSUP_MASK \
+               (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
 
 /**
  * Structure associated with each descriptor of the RX ring of a RX queue.
@@ -625,6 +618,52 @@ eth_igb_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        return nb_tx;
 }
 
+/*********************************************************************
+ *
+ *  TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+               uint16_t nb_pkts)
+{
+       int i, ret;
+       struct rte_mbuf *m;
+
+       for (i = 0; i < nb_pkts; i++) {
+               m = tx_pkts[i];
+
+               /* Check some limitations for TSO in hardware */
+               if (m->ol_flags & PKT_TX_TCP_SEG)
+                       if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
+                                       (m->l2_len + m->l3_len + m->l4_len >
+                                       IGB_TSO_MAX_HDRLEN)) {
+                               rte_errno = -EINVAL;
+                               return i;
+                       }
+
+               if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
+                       rte_errno = -ENOTSUP;
+                       return i;
+               }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+               ret = rte_validate_tx_offload(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+#endif
+               ret = rte_net_intel_cksum_prepare(m);
+               if (ret != 0) {
+                       rte_errno = ret;
+                       return i;
+               }
+       }
+
+       return i;
+}
+
 /*********************************************************************
  *
  *  RX functions
@@ -739,7 +778,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status)
        uint64_t pkt_flags;
 
        /* Check if VLAN present */
-       pkt_flags = (rx_status & E1000_RXD_STAT_VP) ?  PKT_RX_VLAN_PKT : 0;
+       pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
+               PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0);
 
 #if defined(RTE_LIBRTE_IEEE1588)
        if (rx_status & E1000_RXD_STAT_TMST)
@@ -757,7 +797,9 @@ rx_desc_error_to_pkt_flags(uint32_t rx_status)
         */
 
        static uint64_t error_to_pkt_flags_map[4] = {
-               0,  PKT_RX_L4_CKSUM_BAD, PKT_RX_IP_CKSUM_BAD,
+               PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
+               PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
+               PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
                PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
        };
        return error_to_pkt_flags_map[(rx_status >>
@@ -838,7 +880,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                           (unsigned) rx_id, (unsigned) staterr,
                           (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
 
-               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
                if (nmb == NULL) {
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned) rxq->port_id,
@@ -1021,7 +1063,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                           (unsigned) rx_id, (unsigned) staterr,
                           (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length));
 
-               nmb = rte_rxmbuf_alloc(rxq->mb_pool);
+               nmb = rte_mbuf_raw_alloc(rxq->mb_pool);
                if (nmb == NULL) {
                        PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
                                   "queue_id=%u", (unsigned) rxq->port_id,
@@ -1372,6 +1414,7 @@ eth_igb_tx_queue_setup(struct rte_eth_dev *dev,
 
        igb_reset_tx_queue(txq, dev);
        dev->tx_pkt_burst = eth_igb_xmit_pkts;
+       dev->tx_pkt_prepare = &eth_igb_prep_pkts;
        dev->data->tx_queues[queue_idx] = txq;
 
        return 0;
@@ -1537,7 +1580,7 @@ eth_igb_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
                                desc - rxq->nb_rx_desc]);
        }
 
-       return 0;
+       return desc;
 }
 
 int
@@ -1957,7 +2000,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq)
        /* Initialize software ring entries. */
        for (i = 0; i < rxq->nb_rx_desc; i++) {
                volatile union e1000_adv_rx_desc *rxd;
-               struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool);
+               struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
 
                if (mbuf == NULL) {
                        PMD_INIT_LOG(ERR, "RX mbuf alloc failed "