ethdev: add namespace
[dpdk.git] / drivers / net / iavf / iavf_rxtx.h
index 59625a9..2d7f6b1 100644 (file)
 #define IAVF_VPMD_DESCS_PER_LOOP  4
 #define IAVF_VPMD_TX_MAX_FREE_BUF 64
 
-#define IAVF_NO_VECTOR_FLAGS (                          \
-               DEV_TX_OFFLOAD_MULTI_SEGS |              \
-               DEV_TX_OFFLOAD_VLAN_INSERT |             \
-               DEV_TX_OFFLOAD_SCTP_CKSUM |              \
-               DEV_TX_OFFLOAD_UDP_CKSUM |               \
-               DEV_TX_OFFLOAD_TCP_TSO |                 \
-               DEV_TX_OFFLOAD_TCP_CKSUM)
+#define IAVF_TX_NO_VECTOR_FLAGS (                               \
+               RTE_ETH_TX_OFFLOAD_MULTI_SEGS |          \
+               RTE_ETH_TX_OFFLOAD_TCP_TSO)
+
+#define IAVF_TX_VECTOR_OFFLOAD (                                \
+               RTE_ETH_TX_OFFLOAD_VLAN_INSERT |                 \
+               RTE_ETH_TX_OFFLOAD_QINQ_INSERT |                 \
+               RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |          \
+               RTE_ETH_TX_OFFLOAD_SCTP_CKSUM |          \
+               RTE_ETH_TX_OFFLOAD_UDP_CKSUM |           \
+               RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
+
+#define IAVF_RX_VECTOR_OFFLOAD (                                \
+               RTE_ETH_RX_OFFLOAD_CHECKSUM |            \
+               RTE_ETH_RX_OFFLOAD_SCTP_CKSUM |          \
+               RTE_ETH_RX_OFFLOAD_VLAN |                \
+               RTE_ETH_RX_OFFLOAD_RSS_HASH)
+
+#define IAVF_VECTOR_PATH 0
+#define IAVF_VECTOR_OFFLOAD_PATH 1
 
 #define DEFAULT_TX_RS_THRESH     32
 #define DEFAULT_TX_FREE_THRESH   32
 
-#define IAVF_MIN_TSO_MSS          256
+#define IAVF_MIN_TSO_MSS          88
 #define IAVF_MAX_TSO_MSS          9668
 #define IAVF_TSO_MAX_SEG          UINT8_MAX
 #define IAVF_TX_MAX_MTU_SEG       8
 #define IAVF_TX_OFFLOAD_NOTSUP_MASK \
                (PKT_TX_OFFLOAD_MASK ^ IAVF_TX_OFFLOAD_MASK)
 
+/**
+ * Rx Flex Descriptors
+ * These descriptors are used instead of the legacy version descriptors
+ */
+union iavf_16b_rx_flex_desc {
+       struct {
+               __le64 pkt_addr; /* Packet buffer address */
+               __le64 hdr_addr; /* Header buffer address */
+                                /* bit 0 of hdr_addr is DD bit */
+       } read;
+       struct {
+               /* Qword 0 */
+               u8 rxdid; /* descriptor builder profile ID */
+               u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+               __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+               __le16 pkt_len; /* [15:14] are reserved */
+               __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+                                               /* sph=[11:11] */
+                                               /* ff1/ext=[15:12] */
+
+               /* Qword 1 */
+               __le16 status_error0;
+               __le16 l2tag1;
+               __le16 flex_meta0;
+               __le16 flex_meta1;
+       } wb; /* writeback */
+};
+
+union iavf_32b_rx_flex_desc {
+       struct {
+               __le64 pkt_addr; /* Packet buffer address */
+               __le64 hdr_addr; /* Header buffer address */
+                                /* bit 0 of hdr_addr is DD bit */
+               __le64 rsvd1;
+               __le64 rsvd2;
+       } read;
+       struct {
+               /* Qword 0 */
+               u8 rxdid; /* descriptor builder profile ID */
+               u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+               __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+               __le16 pkt_len; /* [15:14] are reserved */
+               __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+                                               /* sph=[11:11] */
+                                               /* ff1/ext=[15:12] */
+
+               /* Qword 1 */
+               __le16 status_error0;
+               __le16 l2tag1;
+               __le16 flex_meta0;
+               __le16 flex_meta1;
+
+               /* Qword 2 */
+               __le16 status_error1;
+               u8 flex_flags2;
+               u8 time_stamp_low;
+               __le16 l2tag2_1st;
+               __le16 l2tag2_2nd;
+
+               /* Qword 3 */
+               __le16 flex_meta2;
+               __le16 flex_meta3;
+               union {
+                       struct {
+                               __le16 flex_meta4;
+                               __le16 flex_meta5;
+                       } flex;
+                       __le32 ts_high;
+               } flex_ts;
+       } wb; /* writeback */
+};
+
 /* HW desc structure, both 16-byte and 32-byte types are supported */
 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 #define iavf_rx_desc iavf_16byte_rx_desc
 #define iavf_rx_flex_desc iavf_32b_rx_flex_desc
 #endif
 
+typedef void (*iavf_rxd_to_pkt_fields_t)(struct iavf_rx_queue *rxq,
+                               struct rte_mbuf *mb,
+                               volatile union iavf_rx_flex_desc *rxdp);
+
 struct iavf_rxq_ops {
        void (*release_mbufs)(struct iavf_rx_queue *rxq);
 };
@@ -114,6 +203,15 @@ struct iavf_rx_queue {
        bool q_set;             /* if rx queue has been configured */
        bool rx_deferred_start; /* don't start this queue in dev start */
        const struct iavf_rxq_ops *ops;
+       uint8_t rx_flags;
+#define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG1     BIT(0)
+#define IAVF_RX_FLAGS_VLAN_TAG_LOC_L2TAG2_2   BIT(1)
+       uint8_t proto_xtr; /* protocol extraction type */
+       uint64_t xtr_ol_flag;
+               /* flexible descriptor metadata extraction offload flag */
+       iavf_rxd_to_pkt_fields_t rxd_to_pkt_fields;
+                               /* handle flexible descriptor by RXDID */
+       uint64_t offloads;
 };
 
 struct iavf_tx_entry {
@@ -122,6 +220,10 @@ struct iavf_tx_entry {
        uint16_t last_id;
 };
 
+struct iavf_tx_vec_entry {
+       struct rte_mbuf *mbuf;
+};
+
 /* Structure associated with each TX queue. */
 struct iavf_tx_queue {
        const struct rte_memzone *mz;  /* memzone for Tx ring */
@@ -147,6 +249,10 @@ struct iavf_tx_queue {
        bool q_set;                    /* if rx queue has been configured */
        bool tx_deferred_start;        /* don't start this queue in dev start */
        const struct iavf_txq_ops *ops;
+#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG1      BIT(0)
+#define IAVF_TX_FLAGS_VLAN_TAG_LOC_L2TAG2      BIT(1)
+       uint8_t vlan_flag;
+       uint8_t tc;
 };
 
 /* Offload features */
@@ -161,77 +267,6 @@ union iavf_tx_offload {
        };
 };
 
-/* Rx Flex Descriptors
- * These descriptors are used instead of the legacy version descriptors
- */
-union iavf_16b_rx_flex_desc {
-       struct {
-               __le64 pkt_addr; /* Packet buffer address */
-               __le64 hdr_addr; /* Header buffer address */
-                                /* bit 0 of hdr_addr is DD bit */
-       } read;
-       struct {
-               /* Qword 0 */
-               u8 rxdid; /* descriptor builder profile ID */
-               u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
-               __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
-               __le16 pkt_len; /* [15:14] are reserved */
-               __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
-                                               /* sph=[11:11] */
-                                               /* ff1/ext=[15:12] */
-
-               /* Qword 1 */
-               __le16 status_error0;
-               __le16 l2tag1;
-               __le16 flex_meta0;
-               __le16 flex_meta1;
-       } wb; /* writeback */
-};
-
-union iavf_32b_rx_flex_desc {
-       struct {
-               __le64 pkt_addr; /* Packet buffer address */
-               __le64 hdr_addr; /* Header buffer address */
-                                /* bit 0 of hdr_addr is DD bit */
-               __le64 rsvd1;
-               __le64 rsvd2;
-       } read;
-       struct {
-               /* Qword 0 */
-               u8 rxdid; /* descriptor builder profile ID */
-               u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
-               __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
-               __le16 pkt_len; /* [15:14] are reserved */
-               __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
-                                               /* sph=[11:11] */
-                                               /* ff1/ext=[15:12] */
-
-               /* Qword 1 */
-               __le16 status_error0;
-               __le16 l2tag1;
-               __le16 flex_meta0;
-               __le16 flex_meta1;
-
-               /* Qword 2 */
-               __le16 status_error1;
-               u8 flex_flags2;
-               u8 time_stamp_low;
-               __le16 l2tag2_1st;
-               __le16 l2tag2_2nd;
-
-               /* Qword 3 */
-               __le16 flex_meta2;
-               __le16 flex_meta3;
-               union {
-                       struct {
-                               __le16 flex_meta4;
-                               __le16 flex_meta5;
-                       } flex;
-                       __le32 ts_high;
-               } flex_ts;
-       } wb; /* writeback */
-};
-
 /* Rx Flex Descriptor
  * RxDID Profile ID 16-21
  * Flex-field 0: RSS hash lower 16-bits
@@ -331,6 +366,7 @@ enum iavf_rxdid {
        IAVF_RXDID_COMMS_AUX_TCP        = 21,
        IAVF_RXDID_COMMS_OVS_1          = 22,
        IAVF_RXDID_COMMS_OVS_2          = 23,
+       IAVF_RXDID_COMMS_AUX_IP_OFFSET  = 25,
        IAVF_RXDID_LAST                 = 63,
 };
 
@@ -355,6 +391,20 @@ enum iavf_rx_flex_desc_status_error_0_bits {
        IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
 };
 
+enum iavf_rx_flex_desc_status_error_1_bits {
+       /* Note: These are predefined bit offsets */
+       IAVF_RX_FLEX_DESC_STATUS1_CPM_S = 0, /* 4 bits */
+       IAVF_RX_FLEX_DESC_STATUS1_NAT_S = 4,
+       IAVF_RX_FLEX_DESC_STATUS1_CRYPTO_S = 5,
+       /* [10:6] reserved */
+       IAVF_RX_FLEX_DESC_STATUS1_L2TAG2P_S = 11,
+       IAVF_RX_FLEX_DESC_STATUS1_XTRMD2_VALID_S = 12,
+       IAVF_RX_FLEX_DESC_STATUS1_XTRMD3_VALID_S = 13,
+       IAVF_RX_FLEX_DESC_STATUS1_XTRMD4_VALID_S = 14,
+       IAVF_RX_FLEX_DESC_STATUS1_XTRMD5_VALID_S = 15,
+       IAVF_RX_FLEX_DESC_STATUS1_LAST /* this entry must be last!!! */
+};
+
 /* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
 #define IAVF_RX_FLEX_DESC_PTYPE_M      (0x3FF) /* 10-bits */
 
@@ -370,7 +420,7 @@ int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
 int iavf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
 int iavf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
-void iavf_dev_rx_queue_release(void *rxq);
+void iavf_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 
 int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
@@ -379,7 +429,8 @@ int iavf_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           const struct rte_eth_txconf *tx_conf);
 int iavf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
 int iavf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
-void iavf_dev_tx_queue_release(void *txq);
+int iavf_dev_tx_done_cleanup(void *txq, uint32_t free_cnt);
+void iavf_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid);
 void iavf_stop_queues(struct rte_eth_dev *dev);
 uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                       uint16_t nb_pkts);
@@ -402,7 +453,7 @@ void iavf_dev_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                          struct rte_eth_rxq_info *qinfo);
 void iavf_dev_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
                          struct rte_eth_txq_info *qinfo);
-uint32_t iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id);
+uint32_t iavf_dev_rxq_count(void *rx_queue);
 int iavf_dev_rx_desc_status(void *rx_queue, uint16_t offset);
 int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
 
@@ -433,10 +484,42 @@ uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                            uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
                                 uint16_t nb_pkts);
+int iavf_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc);
 int iavf_rx_vec_dev_check(struct rte_eth_dev *dev);
 int iavf_tx_vec_dev_check(struct rte_eth_dev *dev);
 int iavf_rxq_vec_setup(struct iavf_rx_queue *rxq);
 int iavf_txq_vec_setup(struct iavf_tx_queue *txq);
+uint16_t iavf_recv_pkts_vec_avx512(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                  uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_avx512_offload(void *rx_queue,
+                                          struct rte_mbuf **rx_pkts,
+                                          uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_avx512_flex_rxd(void *rx_queue,
+                                           struct rte_mbuf **rx_pkts,
+                                           uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
+                                                   struct rte_mbuf **rx_pkts,
+                                                   uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx512(void *rx_queue,
+                                            struct rte_mbuf **rx_pkts,
+                                            uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx512_offload(void *rx_queue,
+                                                    struct rte_mbuf **rx_pkts,
+                                                    uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd(void *rx_queue,
+                                                     struct rte_mbuf **rx_pkts,
+                                                     uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx512_flex_rxd_offload(void *rx_queue,
+                                                             struct rte_mbuf **rx_pkts,
+                                                             uint16_t nb_pkts);
+uint16_t iavf_xmit_pkts_vec_avx512(void *tx_queue, struct rte_mbuf **tx_pkts,
+                                  uint16_t nb_pkts);
+uint16_t iavf_xmit_pkts_vec_avx512_offload(void *tx_queue,
+                                          struct rte_mbuf **tx_pkts,
+                                          uint16_t nb_pkts);
+int iavf_txq_vec_setup_avx512(struct iavf_tx_queue *txq);
+
+uint8_t iavf_proto_xtr_type_to_rxdid(uint8_t xtr_type);
 
 const uint32_t *iavf_get_default_ptype_table(void);
 
@@ -494,8 +577,8 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
 
 #define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
        int i; \
-       for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
-               struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+       for (i = 0; i < (ad)->dev_data->nb_rx_queues; i++) { \
+               struct iavf_rx_queue *rxq = (ad)->dev_data->rx_queues[i]; \
                if (!rxq) \
                        continue; \
                rxq->fdir_enabled = on; \