static inline void
rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d)
{
- uint16_t ptype;
- static const uint16_t pt_lut[] = { 0,
- PKT_RX_IPV4_HDR, PKT_RX_IPV4_HDR_EXT,
- PKT_RX_IPV6_HDR, PKT_RX_IPV6_HDR_EXT,
- 0, 0, 0
+ static const uint32_t
+ ptype_table[FM10K_RXD_PKTTYPE_MASK >> FM10K_RXD_PKTTYPE_SHIFT]
+ __rte_cache_aligned = {
+ [FM10K_PKTTYPE_OTHER] = RTE_PTYPE_L2_ETHER,
+ [FM10K_PKTTYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
+ [FM10K_PKTTYPE_IPV4_EX] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4_EXT,
+ [FM10K_PKTTYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
+ [FM10K_PKTTYPE_IPV6_EX] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6_EXT,
+ [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+ [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_TCP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+ [FM10K_PKTTYPE_IPV4 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+ [FM10K_PKTTYPE_IPV6 | FM10K_PKTTYPE_UDP] = RTE_PTYPE_L2_ETHER |
+ RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
};
+ m->packet_type = ptype_table[(d->w.pkt_info & FM10K_RXD_PKTTYPE_MASK)
+ >> FM10K_RXD_PKTTYPE_SHIFT];
+
if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK)
m->ol_flags |= PKT_RX_RSS_HASH;
if (unlikely(d->d.staterr & FM10K_RXD_STATUS_RXE))
m->ol_flags |= PKT_RX_RECIP_ERR;
-
- ptype = (d->d.data & FM10K_RXD_PKTTYPE_MASK_L3) >>
- FM10K_RXD_PKTTYPE_SHIFT;
- m->ol_flags |= pt_lut[(uint8_t)ptype];
}
uint16_t
static inline void tx_xmit_pkt(struct fm10k_tx_queue *q, struct rte_mbuf *mb)
{
uint16_t last_id;
- uint8_t flags;
+ uint8_t flags, hdrlen;
/* always set the LAST flag on the last descriptor used to
* transmit the packet */
/* set checksum flags on first descriptor of packet. SCTP checksum
* offload is not supported, but we do not explicitly check for this
* case in favor of greatly simplified processing. */
- if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
+ if (mb->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK | PKT_TX_TCP_SEG))
q->hw_ring[q->next_free].flags |= FM10K_TXD_FLAG_CSUM;
/* set vlan if requested */
rte_cpu_to_le_64(MBUF_DMA_ADDR(mb));
q->hw_ring[q->next_free].buflen =
rte_cpu_to_le_16(rte_pktmbuf_data_len(mb));
+
+ if (mb->ol_flags & PKT_TX_TCP_SEG) {
+ hdrlen = mb->outer_l2_len + mb->outer_l3_len + mb->l2_len +
+ mb->l3_len + mb->l4_len;
+ if (q->hw_ring[q->next_free].flags & FM10K_TXD_FLAG_FTAG)
+ hdrlen += sizeof(struct fm10k_ftag);
+
+ if (likely((hdrlen >= FM10K_TSO_MIN_HEADERLEN) &&
+ (hdrlen <= FM10K_TSO_MAX_HEADERLEN) &&
+ (mb->tso_segsz >= FM10K_TSO_MINMSS))) {
+ q->hw_ring[q->next_free].mss = mb->tso_segsz;
+ q->hw_ring[q->next_free].hdrlen = hdrlen;
+ }
+ }
+
if (++q->next_free == q->nb_desc)
q->next_free = 0;
q->next_free = 0;
}
- q->hw_ring[last_id].flags = flags;
+ q->hw_ring[last_id].flags |= flags;
}
uint16_t
mb = tx_pkts[count];
/* running low on descriptors? try to free some... */
- if (q->nb_free < q->free_trigger)
+ if (q->nb_free < q->free_thresh)
tx_free_descriptors(q);
/* make sure there are enough free descriptors to transmit the