raw/ifpga/base: fix SPI transaction
[dpdk.git] / drivers / net / iavf / iavf_rxtx.h
index 09b5bd9..59625a9 100644 (file)
 /* HW desc structure, both 16-byte and 32-byte types are supported */
 #ifdef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
 #define iavf_rx_desc iavf_16byte_rx_desc
+#define iavf_rx_flex_desc iavf_16b_rx_flex_desc
 #else
 #define iavf_rx_desc iavf_32byte_rx_desc
+#define iavf_rx_flex_desc iavf_32b_rx_flex_desc
 #endif
 
 struct iavf_rxq_ops {
@@ -87,6 +89,7 @@ struct iavf_rx_queue {
        struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
        struct rte_mbuf *pkt_last_seg;  /* last segment of current packet */
        struct rte_mbuf fake_mbuf;      /* dummy mbuf */
+       uint8_t rxdid;
 
        /* used for VPMD */
        uint16_t rxrearm_nb;       /* number of remaining to be re-armed */
@@ -101,6 +104,7 @@ struct iavf_rx_queue {
 
        uint16_t port_id;        /* device port ID */
        uint8_t crc_len;        /* 0 if CRC stripped, 4 otherwise */
+       uint8_t fdir_enabled;   /* 0 if FDIR disabled, 1 when enabled */
        uint16_t queue_id;      /* Rx queue index */
        uint16_t rx_buf_len;    /* The packet buffer size */
        uint16_t rx_hdr_len;    /* The header buffer size */
@@ -157,6 +161,206 @@ union iavf_tx_offload {
        };
 };
 
+/* Rx Flex Descriptors
+ * These descriptors are used instead of the legacy version descriptors
+ */
+union iavf_16b_rx_flex_desc {
+       struct {
+               __le64 pkt_addr; /* Packet buffer address */
+               __le64 hdr_addr; /* Header buffer address */
+                                /* bit 0 of hdr_addr is DD bit */
+       } read;
+       struct {
+               /* Qword 0 */
+               u8 rxdid; /* descriptor builder profile ID */
+               u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+               __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+               __le16 pkt_len; /* [15:14] are reserved */
+               __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+                                               /* sph=[11:11] */
+                                               /* ff1/ext=[15:12] */
+
+               /* Qword 1 */
+               __le16 status_error0;
+               __le16 l2tag1;
+               __le16 flex_meta0;
+               __le16 flex_meta1;
+       } wb; /* writeback */
+};
+
+union iavf_32b_rx_flex_desc {
+       struct {
+               __le64 pkt_addr; /* Packet buffer address */
+               __le64 hdr_addr; /* Header buffer address */
+                                /* bit 0 of hdr_addr is DD bit */
+               __le64 rsvd1;
+               __le64 rsvd2;
+       } read;
+       struct {
+               /* Qword 0 */
+               u8 rxdid; /* descriptor builder profile ID */
+               u8 mir_id_umb_cast; /* mirror=[5:0], umb=[7:6] */
+               __le16 ptype_flex_flags0; /* ptype=[9:0], ff0=[15:10] */
+               __le16 pkt_len; /* [15:14] are reserved */
+               __le16 hdr_len_sph_flex_flags1; /* header=[10:0] */
+                                               /* sph=[11:11] */
+                                               /* ff1/ext=[15:12] */
+
+               /* Qword 1 */
+               __le16 status_error0;
+               __le16 l2tag1;
+               __le16 flex_meta0;
+               __le16 flex_meta1;
+
+               /* Qword 2 */
+               __le16 status_error1;
+               u8 flex_flags2;
+               u8 time_stamp_low;
+               __le16 l2tag2_1st;
+               __le16 l2tag2_2nd;
+
+               /* Qword 3 */
+               __le16 flex_meta2;
+               __le16 flex_meta3;
+               union {
+                       struct {
+                               __le16 flex_meta4;
+                               __le16 flex_meta5;
+                       } flex;
+                       __le32 ts_high;
+               } flex_ts;
+       } wb; /* writeback */
+};
+
+/* Rx Flex Descriptor
+ * RxDID Profile ID 16-21
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Flow ID upper 16-bits
+ * Flex-field 4: AUX0
+ * Flex-field 5: AUX1
+ */
+struct iavf_32b_rx_flex_desc_comms {
+       /* Qword 0 */
+       u8 rxdid;
+       u8 mir_id_umb_cast;
+       __le16 ptype_flexi_flags0;
+       __le16 pkt_len;
+       __le16 hdr_len_sph_flex_flags1;
+
+       /* Qword 1 */
+       __le16 status_error0;
+       __le16 l2tag1;
+       __le32 rss_hash;
+
+       /* Qword 2 */
+       __le16 status_error1;
+       u8 flexi_flags2;
+       u8 ts_low;
+       __le16 l2tag2_1st;
+       __le16 l2tag2_2nd;
+
+       /* Qword 3 */
+       __le32 flow_id;
+       union {
+               struct {
+                       __le16 aux0;
+                       __le16 aux1;
+               } flex;
+               __le32 ts_high;
+       } flex_ts;
+};
+
+/* Rx Flex Descriptor
+ * RxDID Profile ID 22-23 (swap Hash and FlowID)
+ * Flex-field 0: Flow ID lower 16-bits
+ * Flex-field 1: Flow ID upper 16-bits
+ * Flex-field 2: RSS hash lower 16-bits
+ * Flex-field 3: RSS hash upper 16-bits
+ * Flex-field 4: AUX0
+ * Flex-field 5: AUX1
+ */
+struct iavf_32b_rx_flex_desc_comms_ovs {
+       /* Qword 0 */
+       u8 rxdid;
+       u8 mir_id_umb_cast;
+       __le16 ptype_flexi_flags0;
+       __le16 pkt_len;
+       __le16 hdr_len_sph_flex_flags1;
+
+       /* Qword 1 */
+       __le16 status_error0;
+       __le16 l2tag1;
+       __le32 flow_id;
+
+       /* Qword 2 */
+       __le16 status_error1;
+       u8 flexi_flags2;
+       u8 ts_low;
+       __le16 l2tag2_1st;
+       __le16 l2tag2_2nd;
+
+       /* Qword 3 */
+       __le32 rss_hash;
+       union {
+               struct {
+                       __le16 aux0;
+                       __le16 aux1;
+               } flex;
+               __le32 ts_high;
+       } flex_ts;
+};
+
+/* Receive Flex Descriptor profile IDs: There are a total
+ * of 64 profiles where profile IDs 0/1 are for legacy; and
+ * profiles 2-63 are flex profiles that can be programmed
+ * with a specific metadata (profile 7 reserved for HW)
+ */
+enum iavf_rxdid {
+       IAVF_RXDID_LEGACY_0             = 0,
+       IAVF_RXDID_LEGACY_1             = 1,
+       IAVF_RXDID_FLEX_NIC             = 2,
+       IAVF_RXDID_FLEX_NIC_2           = 6,
+       IAVF_RXDID_HW                   = 7,
+       IAVF_RXDID_COMMS_GENERIC        = 16,
+       IAVF_RXDID_COMMS_AUX_VLAN       = 17,
+       IAVF_RXDID_COMMS_AUX_IPV4       = 18,
+       IAVF_RXDID_COMMS_AUX_IPV6       = 19,
+       IAVF_RXDID_COMMS_AUX_IPV6_FLOW  = 20,
+       IAVF_RXDID_COMMS_AUX_TCP        = 21,
+       IAVF_RXDID_COMMS_OVS_1          = 22,
+       IAVF_RXDID_COMMS_OVS_2          = 23,
+       IAVF_RXDID_LAST                 = 63,
+};
+
+enum iavf_rx_flex_desc_status_error_0_bits {
+       /* Note: These are predefined bit offsets */
+       IAVF_RX_FLEX_DESC_STATUS0_DD_S = 0,
+       IAVF_RX_FLEX_DESC_STATUS0_EOF_S,
+       IAVF_RX_FLEX_DESC_STATUS0_HBO_S,
+       IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S,
+       IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
+       IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
+       IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
+       IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
+       IAVF_RX_FLEX_DESC_STATUS0_LPBK_S,
+       IAVF_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
+       IAVF_RX_FLEX_DESC_STATUS0_RXE_S,
+       IAVF_RX_FLEX_DESC_STATUS0_CRCP_S,
+       IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
+       IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
+       IAVF_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
+       IAVF_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
+       IAVF_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
+};
+
+/* for iavf_32b_rx_flex_desc.ptype_flex_flags0 member */
+#define IAVF_RX_FLEX_DESC_PTYPE_M      (0x3FF) /* 10-bits */
+
+/* for iavf_32b_rx_flex_desc.pkt_len member */
+#define IAVF_RX_FLX_DESC_PKT_LEN_M     (0x3FFF) /* 14-bits */
+
 int iavf_dev_rx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
                           uint16_t nb_desc,
@@ -179,9 +383,15 @@ void iavf_dev_tx_queue_release(void *txq);
 void iavf_stop_queues(struct rte_eth_dev *dev);
 uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
                       uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
+                                struct rte_mbuf **rx_pkts,
+                                uint16_t nb_pkts);
 uint16_t iavf_recv_scattered_pkts(void *rx_queue,
                                 struct rte_mbuf **rx_pkts,
                                 uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
+                                          struct rte_mbuf **rx_pkts,
+                                          uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                       uint16_t nb_pkts);
 uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -198,16 +408,27 @@ int iavf_dev_tx_desc_status(void *tx_queue, uint16_t offset);
 
 uint16_t iavf_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
                           uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
+                                    uint16_t nb_pkts);
 uint16_t iavf_recv_scattered_pkts_vec(void *rx_queue,
                                     struct rte_mbuf **rx_pkts,
                                     uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_flex_rxd(void *rx_queue,
+                                              struct rte_mbuf **rx_pkts,
+                                              uint16_t nb_pkts);
 uint16_t iavf_xmit_fixed_burst_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                                  uint16_t nb_pkts);
 uint16_t iavf_recv_pkts_vec_avx2(void *rx_queue, struct rte_mbuf **rx_pkts,
                                 uint16_t nb_pkts);
+uint16_t iavf_recv_pkts_vec_avx2_flex_rxd(void *rx_queue,
+                                         struct rte_mbuf **rx_pkts,
+                                         uint16_t nb_pkts);
 uint16_t iavf_recv_scattered_pkts_vec_avx2(void *rx_queue,
                                           struct rte_mbuf **rx_pkts,
                                           uint16_t nb_pkts);
+uint16_t iavf_recv_scattered_pkts_vec_avx2_flex_rxd(void *rx_queue,
+                                                   struct rte_mbuf **rx_pkts,
+                                                   uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
                            uint16_t nb_pkts);
 uint16_t iavf_xmit_pkts_vec_avx2(void *tx_queue, struct rte_mbuf **tx_pkts,
@@ -271,6 +492,35 @@ void iavf_dump_tx_descriptor(const struct iavf_tx_queue *txq,
               tx_desc->cmd_type_offset_bsz);
 }
 
+#define FDIR_PROC_ENABLE_PER_QUEUE(ad, on) do { \
+       int i; \
+       for (i = 0; i < (ad)->eth_dev->data->nb_rx_queues; i++) { \
+               struct iavf_rx_queue *rxq = (ad)->eth_dev->data->rx_queues[i]; \
+               if (!rxq) \
+                       continue; \
+               rxq->fdir_enabled = on; \
+       } \
+       PMD_DRV_LOG(DEBUG, "FDIR processing on RX set to %d", on); \
+} while (0)
+
+/* Enable/disable flow director Rx processing in data path. */
+static inline
+void iavf_fdir_rx_proc_enable(struct iavf_adapter *ad, bool on)
+{
+       if (on) {
+               /* enable flow director processing */
+               FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
+               ad->fdir_ref_cnt++;
+       } else {
+               if (ad->fdir_ref_cnt >= 1) {
+                       ad->fdir_ref_cnt--;
+
+                       if (ad->fdir_ref_cnt == 0)
+                               FDIR_PROC_ENABLE_PER_QUEUE(ad, on);
+               }
+       }
+}
+
 #ifdef RTE_LIBRTE_IAVF_DEBUG_DUMP_DESC
 #define IAVF_DUMP_RX_DESC(rxq, desc, rx_id) \
        iavf_dump_rx_descriptor(rxq, desc, rx_id)