#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM)
+
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
{
uint64_t flags = 0;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
uint16_t flexbh, flexbl;
-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
- mb->hash.fdir.hi =
- rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd);
- flags |= PKT_RX_FDIR_ID;
-#else
flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
flags |= PKT_RX_FDIR_FLX;
}
+#else
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
#endif
return flags;
}
uint32_t *td_offset,
uint8_t l2_len,
uint16_t l3_len,
- uint8_t inner_l2_len,
- uint16_t inner_l3_len,
+ uint8_t outer_l2_len,
+ uint16_t outer_l3_len,
uint32_t *cd_tunneling)
{
if (!l2_len) {
PMD_DRV_LOG(DEBUG, "L2 length set to 0");
return;
}
- *td_offset |= (l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (!l3_len) {
PMD_DRV_LOG(DEBUG, "L3 length set to 0");
return;
}
- /* VXLAN packet TX checksum offload */
- if (unlikely(ol_flags & PKT_TX_VXLAN_CKSUM)) {
- uint8_t l4tun_len;
+ /* UDP tunneling packet TX checksum offload */
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
- l4tun_len = ETHER_VXLAN_HLEN + inner_l2_len;
+ *td_offset |= (outer_l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
- if (ol_flags & PKT_TX_IPV4_CSUM)
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- else if (ol_flags & PKT_TX_IPV6)
+ else if (ol_flags & PKT_TX_OUTER_IPV4)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ else if (ol_flags & PKT_TX_OUTER_IPV6)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
/* Now set the ctx descriptor fields */
- *cd_tunneling |= (l3_len >> 2) <<
+ *cd_tunneling |= (outer_l3_len >> 2) <<
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- I40E_TXD_CTX_UDP_TUNNELING |
- (l4tun_len >> 1) <<
+ (l2_len >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
- l3_len = inner_l3_len;
- }
+ } else
+ *td_offset |= (l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IPV4_CSUM) {
+ if (ol_flags & PKT_TX_IP_CKSUM) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
*td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV4) {
{
uint64_t mask = 0ULL;
- if (flags | PKT_TX_VXLAN_CKSUM)
- mask |= PKT_TX_VXLAN_CKSUM;
+ mask |= PKT_TX_OUTER_IP_CKSUM;
#ifdef RTE_LIBRTE_IEEE1588
mask |= PKT_TX_IEEE1588_TMST;
uint64_t ol_flags;
uint8_t l2_len;
uint16_t l3_len;
- uint8_t inner_l2_len;
- uint16_t inner_l3_len;
+ uint8_t outer_l2_len;
+ uint16_t outer_l3_len;
uint16_t nb_used;
uint16_t nb_ctx;
uint16_t tx_last;
ol_flags = tx_pkt->ol_flags;
l2_len = tx_pkt->l2_len;
- inner_l2_len = tx_pkt->inner_l2_len;
l3_len = tx_pkt->l3_len;
- inner_l3_len = tx_pkt->inner_l3_len;
+ outer_l2_len = tx_pkt->outer_l2_len;
+ outer_l3_len = tx_pkt->outer_l3_len;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
/* Enable checksum offloading */
cd_tunneling_params = 0;
- i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
- l2_len, l3_len, inner_l2_len,
- inner_l3_len,
- &cd_tunneling_params);
+ if (unlikely(ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)) {
+ i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
+ l2_len, l3_len, outer_l2_len,
+ outer_l3_len,
+ &cd_tunneling_params);
+ }
if (unlikely(nb_ctx)) {
/* Setup TX context descriptor if required */
/* Allocate the rx queue data structure */
rxq = rte_zmalloc_socket("i40e rx queue",
sizeof(struct i40e_rx_queue),
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
rxq->sw_ring =
rte_zmalloc_socket("i40e rx sw ring",
sizeof(struct i40e_rx_entry) * len,
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->sw_ring) {
i40e_dev_rx_queue_release(rxq);
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("i40e tx queue",
sizeof(struct i40e_tx_queue),
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
txq->sw_ring =
rte_zmalloc_socket("i40e tx sw ring",
sizeof(struct i40e_tx_entry) * nb_desc,
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->sw_ring) {
i40e_dev_tx_queue_release(txq);
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("i40e fdir tx queue",
sizeof(struct i40e_tx_queue),
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
SOCKET_ID_ANY);
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("i40e fdir rx queue",
sizeof(struct i40e_rx_queue),
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
SOCKET_ID_ANY);
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "