/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#define I40E_TX_CKSUM_OFFLOAD_MASK ( \
+ PKT_TX_IP_CKSUM | \
+ PKT_TX_L4_MASK | \
+ PKT_TX_OUTER_IP_CKSUM)
+
#define RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mb) \
(uint64_t) ((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
i40e_rxd_build_fdir(volatile union i40e_rx_desc *rxdp, struct rte_mbuf *mb)
{
uint64_t flags = 0;
+#ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC
uint16_t flexbh, flexbl;
-#ifdef RTE_LIBRTE_I40E_16BYTE_RX_DESC
- mb->hash.fdir.hi =
- rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd);
- flags |= PKT_RX_FDIR_ID;
-#else
flexbh = (rte_le_to_cpu_32(rxdp->wb.qword2.ext_status) >>
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT) &
I40E_RX_DESC_EXT_STATUS_FLEXBH_MASK;
rte_le_to_cpu_32(rxdp->wb.qword3.lo_dword.flex_bytes_lo);
flags |= PKT_RX_FDIR_FLX;
}
+#else
+ mb->hash.fdir.hi =
+ rte_le_to_cpu_32(rxdp->wb.qword0.hi_dword.fd_id);
+ flags |= PKT_RX_FDIR_ID;
#endif
return flags;
}
i40e_txd_enable_checksum(uint64_t ol_flags,
uint32_t *td_cmd,
uint32_t *td_offset,
- uint8_t l2_len,
- uint16_t l3_len,
- uint8_t inner_l2_len,
- uint16_t inner_l3_len,
+ union i40e_tx_offload tx_offload,
uint32_t *cd_tunneling)
{
- if (!l2_len) {
- PMD_DRV_LOG(DEBUG, "L2 length set to 0");
- return;
- }
- *td_offset |= (l2_len >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+ /* UDP tunneling packet TX checksum offload */
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM) {
- if (!l3_len) {
- PMD_DRV_LOG(DEBUG, "L3 length set to 0");
- return;
- }
-
- /* VXLAN packet TX checksum offload */
- if (unlikely(ol_flags & PKT_TX_VXLAN_CKSUM)) {
- uint8_t l4tun_len;
-
- l4tun_len = ETHER_VXLAN_HLEN + inner_l2_len;
+ *td_offset |= (tx_offload.outer_l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
- if (ol_flags & PKT_TX_IPV4_CSUM)
+ if (ol_flags & PKT_TX_OUTER_IP_CKSUM)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
- else if (ol_flags & PKT_TX_IPV6)
+ else if (ol_flags & PKT_TX_OUTER_IPV4)
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
+ else if (ol_flags & PKT_TX_OUTER_IPV6)
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
/* Now set the ctx descriptor fields */
- *cd_tunneling |= (l3_len >> 2) <<
+ *cd_tunneling |= (tx_offload.outer_l3_len >> 2) <<
I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT |
- I40E_TXD_CTX_UDP_TUNNELING |
- (l4tun_len >> 1) <<
+ (tx_offload.l2_len >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
- l3_len = inner_l3_len;
- }
+ } else
+ *td_offset |= (tx_offload.l2_len >> 1)
+ << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
/* Enable L3 checksum offloads */
- if (ol_flags & PKT_TX_IPV4_CSUM) {
+ if (ol_flags & PKT_TX_IP_CKSUM) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
- *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV4) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4;
- *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
} else if (ol_flags & PKT_TX_IPV6) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
- *td_offset |= (l3_len >> 2) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ *td_offset |= (tx_offload.l3_len >> 2)
+ << I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
+ }
+
+ if (ol_flags & PKT_TX_TCP_SEG) {
+ *td_cmd |= I40E_TX_DESC_CMD_L4T_EOFT_TCP;
+ *td_offset |= (tx_offload.l4_len >> 2)
+ << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT;
+ return;
}
/* Enable L4 checksum offloads */
"rxq->nb_rx_desc=%d",
rxq->rx_free_thresh, rxq->nb_rx_desc);
ret = -EINVAL;
- } else if (!(rxq->nb_rx_desc % rxq->rx_free_thresh) == 0) {
+ } else if (rxq->nb_rx_desc % rxq->rx_free_thresh != 0) {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions: "
"rxq->nb_rx_desc=%d, "
"rxq->rx_free_thresh=%d",
{
uint64_t mask = 0ULL;
- if (flags | PKT_TX_VXLAN_CKSUM)
- mask |= PKT_TX_VXLAN_CKSUM;
+ mask |= (PKT_TX_OUTER_IP_CKSUM | PKT_TX_TCP_SEG);
#ifdef RTE_LIBRTE_IEEE1588
mask |= PKT_TX_IEEE1588_TMST;
return 0;
}
+/* set i40e TSO context descriptor */
+static inline uint64_t
+i40e_set_tso_ctx(struct rte_mbuf *mbuf, union i40e_tx_offload tx_offload)
+{
+ uint64_t ctx_desc = 0;
+ uint32_t cd_cmd, hdr_len, cd_tso_len;
+
+ if (!tx_offload.l4_len) {
+ PMD_DRV_LOG(DEBUG, "L4 length set to 0");
+ return ctx_desc;
+ }
+
+ /**
+ * in case of tunneling packet, the outer_l2_len and
+ * outer_l3_len must be 0.
+ */
+ hdr_len = tx_offload.outer_l2_len +
+ tx_offload.outer_l3_len +
+ tx_offload.l2_len +
+ tx_offload.l3_len +
+ tx_offload.l4_len;
+
+ cd_cmd = I40E_TX_CTX_DESC_TSO;
+ cd_tso_len = mbuf->pkt_len - hdr_len;
+ ctx_desc |= ((uint64_t)cd_cmd << I40E_TXD_CTX_QW1_CMD_SHIFT) |
+ ((uint64_t)cd_tso_len <<
+ I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+ ((uint64_t)mbuf->tso_segsz <<
+ I40E_TXD_CTX_QW1_MSS_SHIFT);
+
+ return ctx_desc;
+}
+
uint16_t
i40e_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
{
uint32_t tx_flags;
uint32_t td_tag;
uint64_t ol_flags;
- uint8_t l2_len;
- uint16_t l3_len;
- uint8_t inner_l2_len;
- uint16_t inner_l3_len;
uint16_t nb_used;
uint16_t nb_ctx;
uint16_t tx_last;
uint16_t slen;
uint64_t buf_dma_addr;
+ union i40e_tx_offload tx_offload = {0};
txq = tx_queue;
sw_ring = txq->sw_ring;
RTE_MBUF_PREFETCH_TO_FREE(txe->mbuf);
ol_flags = tx_pkt->ol_flags;
- l2_len = tx_pkt->l2_len;
- inner_l2_len = tx_pkt->inner_l2_len;
- l3_len = tx_pkt->l3_len;
- inner_l3_len = tx_pkt->inner_l3_len;
+ tx_offload.l2_len = tx_pkt->l2_len;
+ tx_offload.l3_len = tx_pkt->l3_len;
+ tx_offload.outer_l2_len = tx_pkt->outer_l2_len;
+ tx_offload.outer_l3_len = tx_pkt->outer_l3_len;
+ tx_offload.l4_len = tx_pkt->l4_len;
+ tx_offload.tso_segsz = tx_pkt->tso_segsz;
/* Calculate the number of context descriptors needed. */
nb_ctx = i40e_calc_context_desc(ol_flags);
/* Enable checksum offloading */
cd_tunneling_params = 0;
- i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
- l2_len, l3_len, inner_l2_len,
- inner_l3_len,
- &cd_tunneling_params);
+ if (unlikely(ol_flags & I40E_TX_CKSUM_OFFLOAD_MASK)) {
+ i40e_txd_enable_checksum(ol_flags, &td_cmd, &td_offset,
+ tx_offload, &cd_tunneling_params);
+ }
if (unlikely(nb_ctx)) {
/* Setup TX context descriptor if required */
rte_pktmbuf_free_seg(txe->mbuf);
txe->mbuf = NULL;
}
-#ifdef RTE_LIBRTE_IEEE1588
- if (ol_flags & PKT_TX_IEEE1588_TMST)
+
+ /* TSO enabled means no timestamp */
+ if (ol_flags & PKT_TX_TCP_SEG)
cd_type_cmd_tso_mss |=
- ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
- I40E_TXD_CTX_QW1_CMD_SHIFT);
+ i40e_set_tso_ctx(tx_pkt, tx_offload);
+ else {
+#ifdef RTE_LIBRTE_IEEE1588
+ if (ol_flags & PKT_TX_IEEE1588_TMST)
+ cd_type_cmd_tso_mss |=
+ ((uint64_t)I40E_TX_CTX_DESC_TSYN <<
+ I40E_TXD_CTX_QW1_CMD_SHIFT);
#endif
+ }
+
ctx_txd->tunneling_params =
rte_cpu_to_le_32(cd_tunneling_params);
ctx_txd->l2tag2 = rte_cpu_to_le_16(cd_l2tag2);
ctx_txd->type_cmd_tso_mss =
rte_cpu_to_le_64(cd_type_cmd_tso_mss);
+
+ PMD_TX_LOG(DEBUG, "mbuf: %p, TCD[%u]:\n"
+ "tunneling_params: %#x;\n"
+ "l2tag2: %#hx;\n"
+ "rsvd: %#hx;\n"
+ "type_cmd_tso_mss: %#"PRIx64";\n",
+ tx_pkt, tx_id,
+ ctx_txd->tunneling_params,
+ ctx_txd->l2tag2,
+ ctx_txd->rsvd,
+ ctx_txd->type_cmd_tso_mss);
+
txe->last_id = tx_last;
tx_id = txe->next_id;
txe = txn;
/* Setup TX Descriptor */
slen = m_seg->data_len;
buf_dma_addr = RTE_MBUF_DATA_DMA_ADDR(m_seg);
+
+ PMD_TX_LOG(DEBUG, "mbuf: %p, TDD[%u]:\n"
+ "buf_dma_addr: %#"PRIx64";\n"
+ "td_cmd: %#x;\n"
+ "td_offset: %#x;\n"
+ "td_len: %u;\n"
+ "td_tag: %#x;\n",
+ tx_pkt, tx_id, buf_dma_addr,
+ td_cmd, td_offset, slen, td_tag);
+
txd->buffer_addr = rte_cpu_to_le_64(buf_dma_addr);
txd->cmd_type_offset_bsz = i40e_build_ctob(td_cmd,
td_offset, slen, td_tag);
/* Allocate the rx queue data structure */
rxq = rte_zmalloc_socket("i40e rx queue",
sizeof(struct i40e_rx_queue),
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
rxq->sw_ring =
rte_zmalloc_socket("i40e rx sw ring",
sizeof(struct i40e_rx_entry) * len,
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (!rxq->sw_ring) {
i40e_dev_rx_queue_release(rxq);
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("i40e tx queue",
sizeof(struct i40e_tx_queue),
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
txq->sw_ring =
rte_zmalloc_socket("i40e tx sw ring",
sizeof(struct i40e_tx_entry) * nb_desc,
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
socket_id);
if (!txq->sw_ring) {
i40e_dev_tx_queue_release(txq);
struct i40e_pf *pf = I40E_VSI_TO_PF(rxq->vsi);
struct i40e_hw *hw = I40E_VSI_TO_HW(rxq->vsi);
struct rte_eth_dev_data *data = pf->dev_data;
- struct rte_pktmbuf_pool_private *mbp_priv =
- rte_mempool_get_priv(rxq->mp);
- uint16_t buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
- uint16_t len;
+ uint16_t buf_size, len;
+
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
switch (pf->flags & (I40E_FLAG_HEADER_SPLIT_DISABLED |
I40E_FLAG_HEADER_SPLIT_ENABLED)) {
uint16_t pf_q = rxq->reg_idx;
uint16_t buf_size;
struct i40e_hmc_obj_rxq rx_ctx;
- struct rte_pktmbuf_pool_private *mbp_priv;
err = i40e_rx_queue_config(rxq);
if (err < 0) {
rxq->qrx_tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
- mbp_priv = rte_mempool_get_priv(rxq->mp);
- buf_size = (uint16_t)(mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
+ RTE_PKTMBUF_HEADROOM);
/* Check if scattered RX needs to be used. */
if ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
/* Allocate the TX queue data structure. */
txq = rte_zmalloc_socket("i40e fdir tx queue",
sizeof(struct i40e_tx_queue),
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
SOCKET_ID_ANY);
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
/* Allocate the RX queue data structure. */
rxq = rte_zmalloc_socket("i40e fdir rx queue",
sizeof(struct i40e_rx_queue),
- CACHE_LINE_SIZE,
+ RTE_CACHE_LINE_SIZE,
SOCKET_ID_ANY);
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "