}
/* Combine the packet header write. VLAN is not consider here */
- mb->vlan_macip.f.l2_len = l2_len;
- mb->vlan_macip.f.l3_len = l3_len;
+ mb->l2_len = l2_len;
+ mb->l3_len = l3_len;
mb->ol_flags = ol_flags;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
udp_hdr->dgram_len = RTE_CPU_TO_BE_16(pkt_size -
sizeof(*eth_hdr) -
sizeof(*ip_hdr));
- pkt->nb_segs = 1;
- pkt->pkt_len = pkt_size;
- pkt->ol_flags = ol_flags;
- pkt->vlan_macip.f.vlan_tci = vlan_tci;
- pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- pkts_burst[nb_pkt] = pkt;
+ pkt->nb_segs = 1;
+ pkt->pkt_len = pkt_size;
+ pkt->ol_flags = ol_flags;
+ pkt->vlan_tci = vlan_tci;
+ pkt->l2_len = sizeof(struct ether_hdr);
+ pkt->l3_len = sizeof(struct ipv4_hdr);
+ pkts_burst[nb_pkt] = pkt;
next_flow = (next_flow + 1) % cfg_n_flows;
}
ether_addr_copy(&ports[fs->tx_port].eth_addr,
ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->l2_len = sizeof(struct ether_hdr);
+ mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_tci = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
ether_addr_copy(&addr, ð_hdr->s_addr);
mb->ol_flags = txp->tx_ol_flags;
- mb->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mb->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
- mb->vlan_macip.f.vlan_tci = txp->tx_vlan_id;
+ mb->l2_len = sizeof(struct ether_hdr);
+ mb->l3_len = sizeof(struct ipv4_hdr);
+ mb->vlan_tci = txp->tx_vlan_id;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_rx);
fs->tx_packets += nb_tx;
printf(" - FDIR hash=0x%x - FDIR id=0x%x ",
mb->hash.fdir.hash, mb->hash.fdir.id);
if (ol_flags & PKT_RX_VLAN_PKT)
- printf(" - VLAN tci=0x%x",
- mb->vlan_macip.f.vlan_tci);
+ printf(" - VLAN tci=0x%x", mb->vlan_tci);
printf("\n");
if (ol_flags != 0) {
int rxf;
mb->ol_flags = 0;
mb->data = (char *) mb->buf_addr + RTE_PKTMBUF_HEADROOM;
mb->nb_segs = 1;
- mb->vlan_macip.data = 0;
+ mb->l2_l3_len = 0;
+ mb->vlan_tci = 0;
mb->hash.rss = 0;
}
pkt->nb_segs = tx_pkt_nb_segs;
pkt->pkt_len = tx_pkt_length;
pkt->ol_flags = ol_flags;
- pkt->vlan_macip.f.vlan_tci = vlan_tci;
- pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_tci = vlan_tci;
+ pkt->l2_len = sizeof(struct ether_hdr);
+ pkt->l3_len = sizeof(struct ipv4_hdr);
pkts_burst[nb_pkt] = pkt;
}
nb_tx = rte_eth_tx_burst(fs->tx_port, fs->tx_queue, pkts_burst, nb_pkt);
*/
pkt->nb_segs = tx_pkt_nb_segs;
pkt->pkt_len = tx_pkt_length;
- pkt->vlan_macip.f.l2_len = eth_hdr_size;
+ pkt->l2_len = eth_hdr_size;
if (ipv4) {
- pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv4;
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ pkt->vlan_tci = ETHER_TYPE_IPv4;
+ pkt->l3_len = sizeof(struct ipv4_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV4_HDR | PKT_RX_VLAN_PKT;
else
pkt->ol_flags = PKT_RX_IPV4_HDR;
} else {
- pkt->vlan_macip.f.vlan_tci = ETHER_TYPE_IPv6;
- pkt->vlan_macip.f.l3_len = sizeof(struct ipv6_hdr);
+ pkt->vlan_tci = ETHER_TYPE_IPv6;
+ pkt->l3_len = sizeof(struct ipv6_hdr);
if (vlan_enabled)
pkt->ol_flags = PKT_RX_IPV6_HDR | PKT_RX_VLAN_PKT;
rte_panic("No headroom in mbuf.\n");
}
- m->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ m->l2_len = sizeof(struct ether_hdr);
/* 02:00:00:00:00:xx */
d_addr_bytes = ð_hdr->d_addr.addr_bytes[0];
/* Pop Ethernet header */
if (app.ether_hdr_pop_push) {
rte_pktmbuf_adj(m, (uint16_t)sizeof(struct ether_hdr));
- m->vlan_macip.f.l2_len = 0;
- m->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ m->l2_len = 0;
+ m->l3_len = sizeof(struct ipv4_hdr);
}
}
ether_addr_copy(&pkt_meta->nh_arp, ðer_hdr->d_addr);
ether_addr_copy(&local_ether_addr, ðer_hdr->s_addr);
ether_hdr->ether_type = rte_bswap16(ETHER_TYPE_IPv4);
- pkt->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
+ pkt->l2_len = sizeof(struct ether_hdr);
}
static int
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->vlan_macip.f.l3_len = sizeof(*ip_hdr);
+ m->l2_len = sizeof(*eth_hdr);
+ m->l3_len = sizeof(*ip_hdr);
/* process this fragment. */
mo = rte_ipv4_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr);
dr = &qconf->death_row;
/* prepare mbuf: setup l2_len/l3_len. */
- m->vlan_macip.f.l2_len = sizeof(*eth_hdr);
- m->vlan_macip.f.l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
+ m->l2_len = sizeof(*eth_hdr);
+ m->l3_len = sizeof(*ip_hdr) + sizeof(*frag_hdr);
mo = rte_ipv6_frag_reassemble_packet(tbl, dr, m, tms, ip_hdr, frag_hdr);
if (mo == NULL)
/* copy metadata from source packet*/
hdr->port = pkt->port;
- hdr->vlan_macip = pkt->vlan_macip;
+ hdr->vlan_tci = pkt->vlan_tci;
+ hdr->l2_l3_len = pkt->l2_l3_len;
hdr->hash = pkt->hash;
hdr->ol_flags = pkt->ol_flags;
mbuf->buf_addr = m->buf_addr;
}
mbuf->ol_flags = PKT_TX_VLAN_PKT;
- mbuf->vlan_macip.f.vlan_tci = vlan_tag;
- mbuf->vlan_macip.f.l2_len = sizeof(struct ether_hdr);
- mbuf->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ mbuf->vlan_tci = vlan_tag;
+ mbuf->l2_len = sizeof(struct ether_hdr);
+ mbuf->l3_len = sizeof(struct ipv4_hdr);
MBUF_HEADROOM_UINT32(mbuf) = (uint32_t)desc_idx;
tx_q->m_table[len] = mbuf;
struct rte_mbuf *ms;
/* adjust start of the last fragment data. */
- rte_pktmbuf_adj(mp, (uint16_t)(mp->vlan_macip.f.l2_len +
- mp->vlan_macip.f.l3_len));
+ rte_pktmbuf_adj(mp, (uint16_t)(mp->l2_len + mp->l3_len));
/* chain two fragments. */
ms = rte_pktmbuf_lastseg(mn);
out_pkt->pkt_len - sizeof(struct ipv4_hdr));
out_pkt->ol_flags |= PKT_TX_IP_CKSUM;
- out_pkt->vlan_macip.f.l3_len = sizeof(struct ipv4_hdr);
+ out_pkt->l3_len = sizeof(struct ipv4_hdr);
/* Write the fragment to the output list */
pkts_out[out_pkt_pos] = out_pkt;
/* update ipv4 header for the reassmebled packet */
ip_hdr = (struct ipv4_hdr*)(rte_pktmbuf_mtod(m, uint8_t *) +
- m->vlan_macip.f.l2_len);
+ m->l2_len);
ip_hdr->total_length = rte_cpu_to_be_16((uint16_t)(fp->total_size +
- m->vlan_macip.f.l3_len));
+ m->l3_len));
ip_hdr->fragment_offset = (uint16_t)(ip_hdr->fragment_offset &
rte_cpu_to_be_16(IPV4_HDR_DF_FLAG));
ip_hdr->hdr_checksum = 0;
ip_ofs *= IPV4_HDR_OFFSET_UNITS;
ip_len = (uint16_t)(rte_be_to_cpu_16(ip_hdr->total_length) -
- mb->vlan_macip.f.l3_len);
+ mb->l3_len);
IP_FRAG_LOG(DEBUG, "%s:%d:\n"
"mbuf: %p, tms: %" PRIu64
/* update ipv6 header for the reassembled datagram */
ip_hdr = (struct ipv6_hdr *) (rte_pktmbuf_mtod(m, uint8_t *) +
- m->vlan_macip.f.l2_len);
+ m->l2_len);
ip_hdr->payload_len = rte_cpu_to_be_16(payload_len);
* other headers, so we assume there are no other headers and thus update
* the main IPv6 header instead.
*/
- move_len = m->vlan_macip.f.l2_len + m->vlan_macip.f.l3_len -
- sizeof(*frag_hdr);
+ move_len = m->l2_len + m->l3_len - sizeof(*frag_hdr);
frag_hdr = (struct ipv6_extension_fragment *) (ip_hdr + 1);
ip_hdr->proto = frag_hdr->next_header;
*/
#define PKT_TX_OFFLOAD_MASK (PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK)
-/** Offload features */
-union rte_vlan_macip {
- uint32_t data;
- struct {
- uint16_t l3_len:9; /**< L3 (IP) Header Length. */
- uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
- uint16_t vlan_tci;
- /**< VLAN Tag Control Identifier (CPU order). */
- } f;
-};
-
-/*
- * Compare mask for vlan_macip_len.data,
- * should be in sync with rte_vlan_macip.f layout.
- * */
-#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
-#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
-#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
-/**< MAC+IP length. */
-#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
-
/**
* The generic rte_mbuf, containing a packet mbuf.
*/
uint32_t pkt_len; /**< Total pkt len: sum of all segment data_len. */
/* offload features, valid for first segment only */
- union rte_vlan_macip vlan_macip;
+ union {
+ uint16_t l2_l3_len; /**< combined l2/l3 lengths as single var */
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ };
+ };
+ uint16_t vlan_tci; /**< VLAN Tag Control Identifier (CPU order). */
union {
uint32_t rss; /**< RSS hash result if RSS enabled */
struct {
m->next = NULL;
m->pkt_len = 0;
- m->vlan_macip.data = 0;
+ m->l2_l3_len = 0;
+ m->vlan_tci = 0;
m->nb_segs = 1;
m->port = 0xff;
mi->data = md->data;
mi->data_len = md->data_len;
mi->port = md->port;
- mi->vlan_macip = md->vlan_macip;
+ mi->vlan_tci = md->vlan_tci;
+ mi->l2_l3_len = md->l2_l3_len;
mi->hash = md->hash;
mi->next = NULL;
EM_CTX_NUM = 1, /**< CTX NUM */
};
+/** Offload features */
+union em_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l3_len:9; /**< L3 (IP) Header Length. */
+ uint16_t l2_len:7; /**< L2 (MAC) Header Length. */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with em_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
/**
* Structure to check if new context need be built
*/
struct em_ctx_info {
- uint16_t flags; /**< ol_flags related to context build. */
- uint32_t cmp_mask; /**< compare mask */
- union rte_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
+ uint16_t flags; /**< ol_flags related to context build. */
+ uint32_t cmp_mask; /**< compare mask */
+ union em_vlan_macip hdrlen; /**< L2 and L3 header lenghts */
};
/**
em_set_xmit_ctx(struct em_tx_queue* txq,
volatile struct e1000_context_desc *ctx_txd,
uint16_t flags,
- union rte_vlan_macip hdrlen)
+ union em_vlan_macip hdrlen)
{
uint32_t cmp_mask, cmd_len;
uint16_t ipcse, l2len;
*/
static inline uint32_t
what_ctx_update(struct em_tx_queue *txq, uint16_t flags,
- union rte_vlan_macip hdrlen)
+ union em_vlan_macip hdrlen)
{
/* If match with the current context */
if (likely (txq->ctx_cache.flags == flags &&
uint16_t tx_ol_req;
uint32_t ctx;
uint32_t new_ctx;
- union rte_vlan_macip hdrlen;
+ union em_vlan_macip hdrlen;
txq = tx_queue;
sw_ring = txq->sw_ring;
tx_ol_req = (uint16_t)(ol_flags & (PKT_TX_IP_CKSUM |
PKT_TX_L4_MASK));
if (tx_ol_req) {
- hdrlen = tx_pkt->vlan_macip;
+ hdrlen.f.vlan_tci = tx_pkt->vlan_tci;
+ hdrlen.f.l2_len = tx_pkt->l2_len;
+ hdrlen.f.l3_len = tx_pkt->l3_len;
/* If new context to be built or reuse the exist ctx. */
ctx = what_ctx_update(txq, tx_ol_req, hdrlen);
/* Set VLAN Tag offload fields. */
if (ol_flags & PKT_TX_VLAN_PKT) {
cmd_type_len |= E1000_TXD_CMD_VLE;
- popts_spec = tx_pkt->vlan_macip.f.vlan_tci <<
- E1000_TXD_VLAN_SHIFT;
+ popts_spec = tx_pkt->vlan_tci << E1000_TXD_VLAN_SHIFT;
}
if (tx_ol_req) {
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/*
* Store the mbuf address into the next entry of the array
rx_desc_error_to_pkt_flags(rxd.errors));
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci = rte_le_to_cpu_16(rxd.special);
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.special);
/* Prefetch data of first segment, if configured to do so. */
rte_packet_prefetch(first_seg->data);
IGB_CTX_NUM = 2, /**< CTX_NUM */
};
+/** Offload features */
+union igb_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l2_l3_len; /**< 7bit L2 and 9b L3 lengths combined */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with igb_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
/**
* Strucutre to check if new context need be built
*/
struct igb_advctx_info {
uint16_t flags; /**< ol_flags related to context build. */
uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- union rte_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
+ union igb_vlan_macip vlan_macip_lens; /**< vlan, mac & ip length. */
};
/**
volatile union e1000_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
+ union igb_vlan_macip vlan_macip_lens;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
uint16_t tx_ol_req;
uint32_t new_ctx = 0;
uint32_t ctx = 0;
- uint32_t vlan_macip_lens;
txq = tx_queue;
sw_ring = txq->sw_ring;
tx_last = (uint16_t) (tx_id + tx_pkt->nb_segs - 1);
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->vlan_macip.data;
+ vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+ vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
/* If a Context Descriptor need be built . */
if (tx_ol_req) {
ctx = what_advctx_update(txq, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IGB_CTX_NUM);
ctx = txq->ctx_curr;
}
igbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
txe->last_id = tx_last;
tx_id = txe->next_id;
rxm->hash.rss = rxd.wb.lower.hi_dword.rss;
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
I40E_RXD_QW1_LENGTH_PBUF_SHIFT) - rxq->crc_len;
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_macip.f.vlan_tci = rx_status &
+ mb->vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(\
rxdp[j].wb.qword0.lo_dword.l2tag1) : 0;
rxm->data_len = rx_packet_len;
rxm->port = rxq->port_id;
- rxm->vlan_macip.f.vlan_tci = rx_status &
+ rxm->vlan_tci = rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
}
first_seg->port = rxq->port_id;
- first_seg->vlan_macip.f.vlan_tci = (rx_status &
+ first_seg->vlan_tci = (rx_status &
(1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
rte_le_to_cpu_16(rxd.wb.qword0.lo_dword.l2tag1) : 0;
pkt_flags = i40e_rxd_status_to_pkt_flags(qword1);
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
ol_flags = tx_pkt->ol_flags;
- l2_len = tx_pkt->vlan_macip.f.l2_len;
- l3_len = tx_pkt->vlan_macip.f.l3_len;
+ l2_len = tx_pkt->l2_len;
+ l3_len = tx_pkt->l3_len;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
/* Descriptor based VLAN insertion */
if (ol_flags & PKT_TX_VLAN_PKT) {
- tx_flags |= tx_pkt->vlan_macip.f.vlan_tci <<
- I40E_TX_FLAG_L2TAG1_SHIFT;
+ tx_flags |= tx_pkt->vlan_tci <<
+ I40E_TX_FLAG_L2TAG1_SHIFT;
tx_flags |= I40E_TX_FLAG_INSERT_VLAN;
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
td_tag = (tx_flags & I40E_TX_FLAG_L2TAG1_MASK) >>
volatile union ixgbe_adv_tx_desc *txd;
struct rte_mbuf *tx_pkt;
struct rte_mbuf *m_seg;
+ union ixgbe_vlan_macip vlan_macip_lens;
uint64_t buf_dma_addr;
uint32_t olinfo_status;
uint32_t cmd_type_len;
uint16_t nb_tx;
uint16_t nb_used;
uint16_t tx_ol_req;
- uint32_t vlan_macip_lens;
uint32_t ctx = 0;
uint32_t new_ctx;
* are needed for offload functionality.
*/
ol_flags = tx_pkt->ol_flags;
- vlan_macip_lens = tx_pkt->vlan_macip.data;
+ vlan_macip_lens.f.vlan_tci = tx_pkt->vlan_tci;
+ vlan_macip_lens.f.l2_l3_len = tx_pkt->l2_l3_len;
/* If hardware offload required */
tx_ol_req = (uint16_t)(ol_flags & PKT_TX_OFFLOAD_MASK);
if (tx_ol_req) {
/* If new context need be built or reuse the exist ctx. */
ctx = what_advctx_update(txq, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
/* Only allocate context descriptor if required*/
new_ctx = (ctx == IXGBE_CTX_NUM);
ctx = txq->ctx_curr;
}
ixgbe_set_xmit_ctx(txq, ctx_txd, tx_ol_req,
- vlan_macip_lens);
+ vlan_macip_lens.data);
txe->last_id = tx_last;
tx_id = txe->next_id;
rxq->crc_len);
mb->data_len = pkt_len;
mb->pkt_len = pkt_len;
- mb->vlan_macip.f.vlan_tci = rxdp[j].wb.upper.vlan;
+ mb->vlan_tci = rxdp[j].wb.upper.vlan;
mb->hash.rss = rxdp[j].wb.lower.hi_dword.rss;
/* convert descriptor fields to rte mbuf flags */
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
/* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */
- rxm->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
* The vlan_tci field is only valid when PKT_RX_VLAN_PKT is
* set in the pkt_flags field.
*/
- first_seg->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16(rxd.wb.upper.vlan);
+ first_seg->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan);
hlen_type_rss = rte_le_to_cpu_32(rxd.wb.lower.lo_dword.data);
pkt_flags = rx_desc_hlen_type_rss_to_pkt_flags(hlen_type_rss);
pkt_flags = (uint16_t)(pkt_flags |
IXGBE_CTX_NUM = 2, /**< CTX NUMBER */
};
+/** Offload features */
+union ixgbe_vlan_macip {
+ uint32_t data;
+ struct {
+ uint16_t l2_l3_len; /**< combined 9-bit l3, 7-bit l2 lengths */
+ uint16_t vlan_tci;
+ /**< VLAN Tag Control Identifier (CPU order). */
+ } f;
+};
+
+/*
+ * Compare mask for vlan_macip_len.data,
+ * should be in sync with ixgbe_vlan_macip.f layout.
+ * */
+#define TX_VLAN_CMP_MASK 0xFFFF0000 /**< VLAN length - 16-bits. */
+#define TX_MAC_LEN_CMP_MASK 0x0000FE00 /**< MAC length - 7-bits. */
+#define TX_IP_LEN_CMP_MASK 0x000001FF /**< IP length - 9-bits. */
+/** MAC+IP length. */
+#define TX_MACIP_LEN_CMP_MASK (TX_MAC_LEN_CMP_MASK | TX_IP_LEN_CMP_MASK)
+
/**
* Structure to check if new context need be built
*/
struct ixgbe_advctx_info {
uint16_t flags; /**< ol_flags for context build. */
uint32_t cmp_mask; /**< compare mask for vlan_macip_lens */
- union rte_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
+ union ixgbe_vlan_macip vlan_macip_lens; /**< vlan, mac ip length. */
};
/**
rte_pktmbuf_mtod(rxm, void *));
#endif
/* Copy vlan tag in packet buffer */
- rxm->vlan_macip.f.vlan_tci =
- rte_le_to_cpu_16((uint16_t)rcd->tci);
+ rxm->vlan_tci = rte_le_to_cpu_16(
+ (uint16_t)rcd->tci);
} else
rxm->ol_flags = 0;
rxm->pkt_len = (uint16_t)rcd->len;
rxm->data_len = (uint16_t)rcd->len;
rxm->port = rxq->port_id;
- rxm->vlan_macip.f.vlan_tci = 0;
+ rxm->vlan_tci = 0;
rxm->data = (char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM;
rx_pkts[nb_rx++] = rxm;