#define E1000_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define E1000_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define IGB_TX_MAX_SEG UINT8_MAX
+#define IGB_TX_MAX_MTU_SEG UINT8_MAX
+#define EM_TX_MAX_SEG UINT8_MAX
+#define EM_TX_MAX_MTU_SEG UINT8_MAX
+
/* structure for interrupt relative data */
struct e1000_interrupt {
uint32_t flags;
uint16_t eth_igb_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t eth_igb_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t eth_igb_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
uint16_t eth_em_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t eth_em_prep_pkts(void *txq, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
uint16_t eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
eth_dev->dev_ops = ð_em_ops;
eth_dev->rx_pkt_burst = (eth_rx_burst_t)ð_em_recv_pkts;
eth_dev->tx_pkt_burst = (eth_tx_burst_t)ð_em_xmit_pkts;
+ eth_dev->tx_pkt_prepare = (eth_tx_prep_t)ð_em_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
.nb_align = EM_TXD_ALIGN,
+ .nb_seg_max = EM_TX_MAX_SEG,
+ .nb_mtu_seg_max = EM_TX_MAX_MTU_SEG,
};
dev_info->speed_capa = ETH_LINK_SPEED_10M_HD | ETH_LINK_SPEED_10M |
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */
+#define E1000_TX_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_VLAN_PKT)
+
+#define E1000_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ E1000_TX_OFFLOAD_MASK)
+
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
return nb_tx;
}
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_em_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ if (m->ol_flags & E1000_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
/*********************************************************************
*
* RX functions
.nb_max = E1000_MAX_RING_DESC,
.nb_min = E1000_MIN_RING_DESC,
.nb_align = IGB_RXD_ALIGN,
+ .nb_seg_max = IGB_TX_MAX_SEG,
+ .nb_mtu_seg_max = IGB_TX_MAX_MTU_SEG,
};
static const struct eth_dev_ops eth_igb_ops = {
eth_dev->dev_ops = ð_igb_ops;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
+ eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
eth_dev->dev_ops = &igbvf_eth_dev_ops;
eth_dev->rx_pkt_burst = ð_igb_recv_pkts;
eth_dev->tx_pkt_burst = ð_igb_xmit_pkts;
+ eth_dev->tx_pkt_prepare = ð_igb_prep_pkts;
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_sctp.h>
+#include <rte_net.h>
#include <rte_string_fns.h>
#include "e1000_logs.h"
PKT_TX_L4_MASK | \
PKT_TX_TCP_SEG)
+#define IGB_TX_OFFLOAD_NOTSUP_MASK \
+ (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
+
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
*/
return nb_tx;
}
+/*********************************************************************
+ *
+ * TX prep functions
+ *
+ **********************************************************************/
+uint16_t
+eth_igb_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ int i, ret;
+ struct rte_mbuf *m;
+
+ for (i = 0; i < nb_pkts; i++) {
+ m = tx_pkts[i];
+
+ /* Check some limitations for TSO in hardware */
+ if (m->ol_flags & PKT_TX_TCP_SEG)
+ if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
+ (m->l2_len + m->l3_len + m->l4_len >
+ IGB_TSO_MAX_HDRLEN)) {
+ rte_errno = -EINVAL;
+ return i;
+ }
+
+ if (m->ol_flags & IGB_TX_OFFLOAD_NOTSUP_MASK) {
+ rte_errno = -ENOTSUP;
+ return i;
+ }
+
+#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+ ret = rte_validate_tx_offload(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+#endif
+ ret = rte_net_intel_cksum_prepare(m);
+ if (ret != 0) {
+ rte_errno = ret;
+ return i;
+ }
+ }
+
+ return i;
+}
+
/*********************************************************************
*
* RX functions
igb_reset_tx_queue(txq, dev);
dev->tx_pkt_burst = eth_igb_xmit_pkts;
+ dev->tx_pkt_prepare = ð_igb_prep_pkts;
dev->data->tx_queues[queue_idx] = txq;
return 0;