#include <rte_malloc.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
#include <rte_prefetch.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include "e1000_ethdev.h"
#ifdef RTE_LIBRTE_IEEE1588
-#define IGB_TX_IEEE1588_TMST PKT_TX_IEEE1588_TMST
+#define IGB_TX_IEEE1588_TMST RTE_MBUF_F_TX_IEEE1588_TMST
#else
#define IGB_TX_IEEE1588_TMST 0
#endif
/* Bit Mask to indicate what bits required for building TX context */
-#define IGB_TX_OFFLOAD_MASK ( \
- PKT_TX_OUTER_IPV6 | \
- PKT_TX_OUTER_IPV4 | \
- PKT_TX_IPV6 | \
- PKT_TX_IPV4 | \
- PKT_TX_VLAN_PKT | \
- PKT_TX_IP_CKSUM | \
- PKT_TX_L4_MASK | \
- PKT_TX_TCP_SEG | \
+#define IGB_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_OUTER_IPV6 | \
+ RTE_MBUF_F_TX_OUTER_IPV4 | \
+ RTE_MBUF_F_TX_IPV6 | \
+ RTE_MBUF_F_TX_IPV4 | \
+ RTE_MBUF_F_TX_VLAN_PKT | \
+ RTE_MBUF_F_TX_IP_CKSUM | \
+ RTE_MBUF_F_TX_L4_MASK | \
+ RTE_MBUF_F_TX_TCP_SEG | \
IGB_TX_IEEE1588_TMST)
#define IGB_TX_OFFLOAD_NOTSUP_MASK \
- (PKT_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
+ (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IGB_TX_OFFLOAD_MASK)
/**
* Structure associated with each descriptor of the RX ring of a RX queue.
static inline uint64_t
check_tso_para(uint64_t ol_req, union igb_tx_offload ol_para)
{
- if (!(ol_req & PKT_TX_TCP_SEG))
+ if (!(ol_req & RTE_MBUF_F_TX_TCP_SEG))
return ol_req;
if ((ol_para.tso_segsz > IGB_TSO_MAX_MSS) || (ol_para.l2_len +
ol_para.l3_len + ol_para.l4_len > IGB_TSO_MAX_HDRLEN)) {
- ol_req &= ~PKT_TX_TCP_SEG;
- ol_req |= PKT_TX_TCP_CKSUM;
+ ol_req &= ~RTE_MBUF_F_TX_TCP_SEG;
+ ol_req |= RTE_MBUF_F_TX_TCP_CKSUM;
}
return ol_req;
}
/* Specify which HW CTX to upload. */
mss_l4len_idx = (ctx_idx << E1000_ADVTXD_IDX_SHIFT);
- if (ol_flags & PKT_TX_VLAN_PKT)
+ if (ol_flags & RTE_MBUF_F_TX_VLAN_PKT)
tx_offload_mask.data |= TX_VLAN_CMP_MASK;
/* check if TCP segmentation required for this packet */
- if (ol_flags & PKT_TX_TCP_SEG) {
+ if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) {
/* implies IP cksum in IPv4 */
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4 |
E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= tx_offload.tso_segsz << E1000_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= tx_offload.l4_len << E1000_ADVTXD_L4LEN_SHIFT;
} else { /* no TSO, check if hardware checksum is needed */
- if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ if (ol_flags & (RTE_MBUF_F_TX_IP_CKSUM | RTE_MBUF_F_TX_L4_MASK))
tx_offload_mask.data |= TX_MACIP_LEN_CMP_MASK;
- if (ol_flags & PKT_TX_IP_CKSUM)
+ if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM)
type_tucmd_mlhl = E1000_ADVTXD_TUCMD_IPV4;
- switch (ol_flags & PKT_TX_L4_MASK) {
- case PKT_TX_UDP_CKSUM:
+ switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) {
+ case RTE_MBUF_F_TX_UDP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_udp_hdr)
<< E1000_ADVTXD_L4LEN_SHIFT;
break;
- case PKT_TX_TCP_CKSUM:
+ case RTE_MBUF_F_TX_TCP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_tcp_hdr)
<< E1000_ADVTXD_L4LEN_SHIFT;
break;
- case PKT_TX_SCTP_CKSUM:
+ case RTE_MBUF_F_TX_SCTP_CKSUM:
type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP |
E1000_ADVTXD_DTYP_CTXT | E1000_ADVTXD_DCMD_DEXT;
mss_l4len_idx |= sizeof(struct rte_sctp_hdr)
static const uint32_t l3_olinfo[2] = {0, E1000_ADVTXD_POPTS_IXSM};
uint32_t tmp;
- tmp = l4_olinfo[(ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM];
- tmp |= l3_olinfo[(ol_flags & PKT_TX_IP_CKSUM) != 0];
- tmp |= l4_olinfo[(ol_flags & PKT_TX_TCP_SEG) != 0];
+ tmp = l4_olinfo[(ol_flags & RTE_MBUF_F_TX_L4_MASK) != RTE_MBUF_F_TX_L4_NO_CKSUM];
+ tmp |= l3_olinfo[(ol_flags & RTE_MBUF_F_TX_IP_CKSUM) != 0];
+ tmp |= l4_olinfo[(ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0];
return tmp;
}
uint32_t cmdtype;
static uint32_t vlan_cmd[2] = {0, E1000_ADVTXD_DCMD_VLE};
static uint32_t tso_cmd[2] = {0, E1000_ADVTXD_DCMD_TSE};
- cmdtype = vlan_cmd[(ol_flags & PKT_TX_VLAN_PKT) != 0];
- cmdtype |= tso_cmd[(ol_flags & PKT_TX_TCP_SEG) != 0];
+ cmdtype = vlan_cmd[(ol_flags & RTE_MBUF_F_TX_VLAN_PKT) != 0];
+ cmdtype |= tso_cmd[(ol_flags & RTE_MBUF_F_TX_TCP_SEG) != 0];
return cmdtype;
}
*/
cmd_type_len = txq->txd_type |
E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT;
- if (tx_ol_req & PKT_TX_TCP_SEG)
+ if (tx_ol_req & RTE_MBUF_F_TX_TCP_SEG)
pkt_len -= (tx_pkt->l2_len + tx_pkt->l3_len + tx_pkt->l4_len);
olinfo_status = (pkt_len << E1000_ADVTXD_PAYLEN_SHIFT);
#if defined(RTE_LIBRTE_IEEE1588)
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+ if (ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST)
cmd_type_len |= E1000_ADVTXD_MAC_TSTAMP;
#endif
if (tx_ol_req) {
m = tx_pkts[i];
/* Check some limitations for TSO in hardware */
- if (m->ol_flags & PKT_TX_TCP_SEG)
+ if (m->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
if ((m->tso_segsz > IGB_TSO_MAX_MSS) ||
(m->l2_len + m->l3_len + m->l4_len >
IGB_TSO_MAX_HDRLEN)) {
return i;
}
-#ifdef RTE_LIBRTE_ETHDEV_DEBUG
+#ifdef RTE_ETHDEV_DEBUG_TX
ret = rte_validate_tx_offload(m);
if (ret != 0) {
rte_errno = -ret;
static inline uint64_t
rx_desc_hlen_type_rss_to_pkt_flags(struct igb_rx_queue *rxq, uint32_t hl_tp_rs)
{
- uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : PKT_RX_RSS_HASH;
+ uint64_t pkt_flags = ((hl_tp_rs & 0x0F) == 0) ? 0 : RTE_MBUF_F_RX_RSS_HASH;
#if defined(RTE_LIBRTE_IEEE1588)
static uint32_t ip_pkt_etqf_map[8] = {
- 0, 0, 0, PKT_RX_IEEE1588_PTP,
+ 0, 0, 0, RTE_MBUF_F_RX_IEEE1588_PTP,
0, 0, 0, 0,
};
/* Check if VLAN present */
pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ?
- PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED : 0);
+ RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED : 0);
#if defined(RTE_LIBRTE_IEEE1588)
if (rx_status & E1000_RXD_STAT_TMST)
- pkt_flags = pkt_flags | PKT_RX_IEEE1588_TMST;
+ pkt_flags = pkt_flags | RTE_MBUF_F_RX_IEEE1588_TMST;
#endif
return pkt_flags;
}
*/
static uint64_t error_to_pkt_flags_map[4] = {
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_GOOD,
- PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
+ RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD
};
return error_to_pkt_flags_map[(rx_status >>
E1000_RXD_ERR_CKSUM_BIT) & E1000_RXD_ERR_CKSUM_MSK];
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/*
- * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field and must be in CPU byte order.
*/
if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
first_seg->hash.rss = rxd.wb.lower.hi_dword.rss;
/*
- * The vlan_tci field is only valid when PKT_RX_VLAN is
+ * The vlan_tci field is only valid when RTE_MBUF_F_RX_VLAN is
* set in the pkt_flags field and must be in CPU byte order.
*/
if ((staterr & rte_cpu_to_le_32(E1000_RXDEXT_STATERR_LB)) &&
uint16_t tx_id; /* Current segment being processed. */
uint16_t tx_last; /* Last segment in the current packet. */
uint16_t tx_next; /* First segment of the next packet. */
- int count;
+ int count = 0;
- if (txq != NULL) {
- count = 0;
- sw_ring = txq->sw_ring;
- txr = txq->tx_ring;
+ if (!txq)
+ return -ENODEV;
- /*
- * tx_tail is the last sent packet on the sw_ring. Goto the end
- * of that packet (the last segment in the packet chain) and
- * then the next segment will be the start of the oldest segment
- * in the sw_ring. This is the first packet that will be
- * attempted to be freed.
- */
+ sw_ring = txq->sw_ring;
+ txr = txq->tx_ring;
- /* Get last segment in most recently added packet. */
- tx_first = sw_ring[txq->tx_tail].last_id;
+ /* tx_tail is the last sent packet on the sw_ring. Goto the end
+ * of that packet (the last segment in the packet chain) and
+ * then the next segment will be the start of the oldest segment
+ * in the sw_ring. This is the first packet that will be
+ * attempted to be freed.
+ */
- /* Get the next segment, which is the oldest segment in ring. */
- tx_first = sw_ring[tx_first].next_id;
+ /* Get last segment in most recently added packet. */
+ tx_first = sw_ring[txq->tx_tail].last_id;
- /* Set the current index to the first. */
- tx_id = tx_first;
+ /* Get the next segment, which is the oldest segment in ring. */
+ tx_first = sw_ring[tx_first].next_id;
- /*
- * Loop through each packet. For each packet, verify that an
- * mbuf exists and that the last segment is free. If so, free
- * it and move on.
- */
- while (1) {
- tx_last = sw_ring[tx_id].last_id;
-
- if (sw_ring[tx_last].mbuf) {
- if (txr[tx_last].wb.status &
- E1000_TXD_STAT_DD) {
- /*
- * Increment the number of packets
- * freed.
- */
- count++;
-
- /* Get the start of the next packet. */
- tx_next = sw_ring[tx_last].next_id;
-
- /*
- * Loop through all segments in a
- * packet.
- */
- do {
- rte_pktmbuf_free_seg(sw_ring[tx_id].mbuf);
+ /* Set the current index to the first. */
+ tx_id = tx_first;
+
+ /* Loop through each packet. For each packet, verify that an
+ * mbuf exists and that the last segment is free. If so, free
+ * it and move on.
+ */
+ while (1) {
+ tx_last = sw_ring[tx_id].last_id;
+
+ if (sw_ring[tx_last].mbuf) {
+ if (txr[tx_last].wb.status &
+ E1000_TXD_STAT_DD) {
+ /* Increment the number of packets
+ * freed.
+ */
+ count++;
+
+ /* Get the start of the next packet. */
+ tx_next = sw_ring[tx_last].next_id;
+
+ /* Loop through all segments in a
+ * packet.
+ */
+ do {
+ if (sw_ring[tx_id].mbuf) {
+ rte_pktmbuf_free_seg(
+ sw_ring[tx_id].mbuf);
sw_ring[tx_id].mbuf = NULL;
sw_ring[tx_id].last_id = tx_id;
+ }
- /* Move to next segemnt. */
- tx_id = sw_ring[tx_id].next_id;
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
- } while (tx_id != tx_next);
+ } while (tx_id != tx_next);
- if (unlikely(count == (int)free_cnt))
- break;
- } else
- /*
- * mbuf still in use, nothing left to
- * free.
- */
+ if (unlikely(count == (int)free_cnt))
break;
} else {
- /*
- * There are multiple reasons to be here:
- * 1) All the packets on the ring have been
- * freed - tx_id is equal to tx_first
- * and some packets have been freed.
- * - Done, exit
- * 2) Interfaces has not sent a rings worth of
- * packets yet, so the segment after tail is
- * still empty. Or a previous call to this
- * function freed some of the segments but
- * not all so there is a hole in the list.
- * Hopefully this is a rare case.
- * - Walk the list and find the next mbuf. If
- * there isn't one, then done.
+ /* mbuf still in use, nothing left to
+ * free.
*/
- if (likely((tx_id == tx_first) && (count != 0)))
- break;
+ break;
+ }
+ } else {
+ /* There are multiple reasons to be here:
+ * 1) All the packets on the ring have been
+ * freed - tx_id is equal to tx_first
+ * and some packets have been freed.
+ * - Done, exit
+ * 2) Interfaces has not sent a rings worth of
+ * packets yet, so the segment after tail is
+ * still empty. Or a previous call to this
+ * function freed some of the segments but
+ * not all so there is a hole in the list.
+ * Hopefully this is a rare case.
+ * - Walk the list and find the next mbuf. If
+ * there isn't one, then done.
+ */
+ if (likely(tx_id == tx_first && count != 0))
+ break;
- /*
- * Walk the list and find the next mbuf, if any.
- */
- do {
- /* Move to next segemnt. */
- tx_id = sw_ring[tx_id].next_id;
+ /* Walk the list and find the next mbuf, if any. */
+ do {
+ /* Move to next segemnt. */
+ tx_id = sw_ring[tx_id].next_id;
- if (sw_ring[tx_id].mbuf)
- break;
+ if (sw_ring[tx_id].mbuf)
+ break;
- } while (tx_id != tx_first);
+ } while (tx_id != tx_first);
- /*
- * Determine why previous loop bailed. If there
- * is not an mbuf, done.
- */
- if (sw_ring[tx_id].mbuf == NULL)
- break;
- }
+ /* Determine why previous loop bailed. If there
+ * is not an mbuf, done.
+ */
+ if (!sw_ring[tx_id].mbuf)
+ break;
}
- } else
- count = -ENODEV;
+ }
return count;
}
igb_get_rx_port_offloads_capa(struct rte_eth_dev *dev)
{
uint64_t rx_offload_capa;
+ struct e1000_hw *hw;
+
+ hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- RTE_SET_USED(dev);
rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP |
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_RSS_HASH;
+ if (hw->mac.type == e1000_i350 ||
+ hw->mac.type == e1000_i210 ||
+ hw->mac.type == e1000_i211)
+ rx_offload_capa |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+
return rx_offload_capa;
}
* Configure support of jumbo frames, if any.
*/
if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ uint32_t max_len = dev->data->dev_conf.rxmode.max_rx_pkt_len;
+
rctl |= E1000_RCTL_LPE;
/*
* Set maximum packet length by default, and might be updated
* together with enabling/disabling dual VLAN.
*/
- E1000_WRITE_REG(hw, E1000_RLPML,
- dev->data->dev_conf.rxmode.max_rx_pkt_len +
- VLAN_TAG_SIZE);
+ if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ max_len += VLAN_TAG_SIZE;
+
+ E1000_WRITE_REG(hw, E1000_RLPML, max_len);
} else
rctl &= ~E1000_RCTL_LPE;