X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fiavf%2Fiavf_rxtx_vec_common.h;h=a59cb2ceee0e73fe0760bb68c8b20701c8446741;hb=57d6a458a8b8d2f41598b6f8f8d9459b34d1dd8f;hp=816e16a937bf1ce1ce4657b3e63f31ad73a35bd9;hpb=4eb3dcce7c5dacd57cfb9b6cfb3c1b52846ee9de;p=dpdk.git diff --git a/drivers/net/iavf/iavf_rxtx_vec_common.h b/drivers/net/iavf/iavf_rxtx_vec_common.h index 816e16a937..a59cb2ceee 100644 --- a/drivers/net/iavf/iavf_rxtx_vec_common.h +++ b/drivers/net/iavf/iavf_rxtx_vec_common.h @@ -15,7 +15,7 @@ #pragma GCC diagnostic ignored "-Wcast-qual" #endif -static inline uint16_t +static __rte_always_inline uint16_t reassemble_packets(struct iavf_rx_queue *rxq, struct rte_mbuf **rx_bufs, uint16_t nb_bufs, uint8_t *split_flags) { @@ -231,7 +231,13 @@ iavf_rx_vec_queue_default(struct iavf_rx_queue *rxq) if (rxq->proto_xtr != IAVF_PROTO_XTR_NONE) return -1; - return 0; + if (rxq->offloads & RTE_ETH_RX_OFFLOAD_TIMESTAMP) + return -1; + + if (rxq->offloads & IAVF_RX_VECTOR_OFFLOAD) + return IAVF_VECTOR_OFFLOAD_PATH; + + return IAVF_VECTOR_PATH; } static inline int @@ -240,14 +246,17 @@ iavf_tx_vec_queue_default(struct iavf_tx_queue *txq) if (!txq) return -1; - if (txq->offloads & IAVF_NO_VECTOR_FLAGS) - return -1; - if (txq->rs_thresh < IAVF_VPMD_TX_MAX_BURST || txq->rs_thresh > IAVF_VPMD_TX_MAX_FREE_BUF) return -1; - return 0; + if (txq->offloads & IAVF_TX_NO_VECTOR_FLAGS) + return -1; + + if (txq->offloads & IAVF_TX_VECTOR_OFFLOAD) + return IAVF_VECTOR_OFFLOAD_PATH; + + return IAVF_VECTOR_PATH; } static inline int @@ -255,14 +264,20 @@ iavf_rx_vec_dev_check_default(struct rte_eth_dev *dev) { int i; struct iavf_rx_queue *rxq; + int ret; + int result = 0; for (i = 0; i < dev->data->nb_rx_queues; i++) { rxq = dev->data->rx_queues[i]; - if (iavf_rx_vec_queue_default(rxq)) + ret = iavf_rx_vec_queue_default(rxq); + + if (ret < 0) return -1; + if (ret > result) + result = ret; } - return 0; + return result; } static inline int @@ -270,14 +285,97 @@ iavf_tx_vec_dev_check_default(struct rte_eth_dev *dev) { int i; struct iavf_tx_queue *txq; + int ret; + int result = 0; for (i = 0; i < dev->data->nb_tx_queues; i++) { txq = dev->data->tx_queues[i]; - if (iavf_tx_vec_queue_default(txq)) + ret = iavf_tx_vec_queue_default(txq); + + if (ret < 0) return -1; + if (ret > result) + result = ret; } - return 0; + return result; +} + +/****************************************************************************** + * If user knows a specific offload is not enabled by APP, + * the macro can be commented to save the effort of fast path. + * Currently below 2 features are supported in TX path, + * 1, checksum offload + * 2, VLAN/QINQ insertion + ******************************************************************************/ +#define IAVF_TX_CSUM_OFFLOAD +#define IAVF_TX_VLAN_QINQ_OFFLOAD + +static __rte_always_inline void +iavf_txd_enable_offload(__rte_unused struct rte_mbuf *tx_pkt, + uint64_t *txd_hi) +{ +#if defined(IAVF_TX_CSUM_OFFLOAD) || defined(IAVF_TX_VLAN_QINQ_OFFLOAD) + uint64_t ol_flags = tx_pkt->ol_flags; +#endif + uint32_t td_cmd = 0; +#ifdef IAVF_TX_CSUM_OFFLOAD + uint32_t td_offset = 0; +#endif + +#ifdef IAVF_TX_CSUM_OFFLOAD + /* Set MACLEN */ + td_offset |= (tx_pkt->l2_len >> 1) << + IAVF_TX_DESC_LENGTH_MACLEN_SHIFT; + + /* Enable L3 checksum offloads */ + if (ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { + td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM; + td_offset |= (tx_pkt->l3_len >> 2) << + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (ol_flags & RTE_MBUF_F_TX_IPV4) { + td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV4; + td_offset |= (tx_pkt->l3_len >> 2) << + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } else if (ol_flags & RTE_MBUF_F_TX_IPV6) { + td_cmd |= IAVF_TX_DESC_CMD_IIPT_IPV6; + td_offset |= (tx_pkt->l3_len >> 2) << + IAVF_TX_DESC_LENGTH_IPLEN_SHIFT; + } + + /* Enable L4 checksum offloads */ + switch (ol_flags & RTE_MBUF_F_TX_L4_MASK) { + case RTE_MBUF_F_TX_TCP_CKSUM: + td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; + td_offset |= (sizeof(struct rte_tcp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case RTE_MBUF_F_TX_SCTP_CKSUM: + td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_SCTP; + td_offset |= (sizeof(struct rte_sctp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + case RTE_MBUF_F_TX_UDP_CKSUM: + td_cmd |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; + td_offset |= (sizeof(struct rte_udp_hdr) >> 2) << + IAVF_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; + break; + default: + break; + } + + *txd_hi |= ((uint64_t)td_offset) << IAVF_TXD_QW1_OFFSET_SHIFT; +#endif + +#ifdef IAVF_TX_VLAN_QINQ_OFFLOAD + if (ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { + td_cmd |= IAVF_TX_DESC_CMD_IL2TAG1; + *txd_hi |= ((uint64_t)tx_pkt->vlan_tci << + IAVF_TXD_QW1_L2TAG1_SHIFT); + } +#endif + + *txd_hi |= ((uint64_t)td_cmd) << IAVF_TXD_QW1_CMD_SHIFT; } #ifdef CC_AVX2_SUPPORT @@ -475,7 +573,7 @@ iavf_rxq_rearm_common(struct iavf_rx_queue *rxq, __rte_unused bool avx512) (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); /* Update the tail pointer on the NIC */ - IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id); + IAVF_PCI_REG_WC_WRITE(rxq->qrx_tail, rx_id); } #endif