net/virtio: move vring alignment to generic header
[dpdk.git] / drivers / net / ice / ice_rxtx.c
index d052bd0..4136d04 100644 (file)
@@ -2,7 +2,7 @@
  * Copyright(c) 2018 Intel Corporation
  */
 
-#include <rte_ethdev_driver.h>
+#include <ethdev_driver.h>
 #include <rte_net.h>
 #include <rte_vect.h>
 
@@ -26,6 +26,32 @@ uint64_t rte_net_ice_dynflag_proto_xtr_ipv6_flow_mask;
 uint64_t rte_net_ice_dynflag_proto_xtr_tcp_mask;
 uint64_t rte_net_ice_dynflag_proto_xtr_ip_offset_mask;
 
+int
+ice_get_monitor_addr(void *rx_queue, struct rte_power_monitor_cond *pmc)
+{
+       volatile union ice_rx_flex_desc *rxdp;
+       struct ice_rx_queue *rxq = rx_queue;
+       uint16_t desc;
+
+       desc = rxq->rx_tail;
+       rxdp = &rxq->rx_ring[desc];
+       /* watch for changes in status bit */
+       pmc->addr = &rxdp->wb.status_error0;
+
+       /*
+        * we expect the DD bit to be set to 1 if this descriptor was already
+        * written to.
+        */
+       pmc->val = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
+       pmc->mask = rte_cpu_to_le_16(1 << ICE_RX_FLEX_DESC_STATUS0_DD_S);
+
+       /* register is 16-bit */
+       pmc->size = sizeof(uint16_t);
+
+       return 0;
+}
+
+
 static inline uint8_t
 ice_proto_xtr_type_to_rxdid(uint8_t xtr_type)
 {
@@ -246,23 +272,23 @@ ice_program_hw_rx_queue(struct ice_rx_queue *rxq)
                                   dev->data->dev_conf.rxmode.max_rx_pkt_len);
 
        if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
-               if (rxq->max_pkt_len <= RTE_ETHER_MAX_LEN ||
+               if (rxq->max_pkt_len <= ICE_ETH_MAX_LEN ||
                    rxq->max_pkt_len > ICE_FRAME_SIZE_MAX) {
                        PMD_DRV_LOG(ERR, "maximum packet length must "
                                    "be larger than %u and smaller than %u,"
                                    "as jumbo frame is enabled",
-                                   (uint32_t)RTE_ETHER_MAX_LEN,
+                                   (uint32_t)ICE_ETH_MAX_LEN,
                                    (uint32_t)ICE_FRAME_SIZE_MAX);
                        return -EINVAL;
                }
        } else {
                if (rxq->max_pkt_len < RTE_ETHER_MIN_LEN ||
-                   rxq->max_pkt_len > RTE_ETHER_MAX_LEN) {
+                   rxq->max_pkt_len > ICE_ETH_MAX_LEN) {
                        PMD_DRV_LOG(ERR, "maximum packet length must be "
                                    "larger than %u and smaller than %u, "
                                    "as jumbo frame is disabled",
                                    (uint32_t)RTE_ETHER_MIN_LEN,
-                                   (uint32_t)RTE_ETHER_MAX_LEN);
+                                   (uint32_t)ICE_ETH_MAX_LEN);
                        return -EINVAL;
                }
        }
@@ -701,7 +727,7 @@ ice_fdir_program_hw_rx_queue(struct ice_rx_queue *rxq)
        rx_ctx.hbuf = rxq->rx_hdr_len >> ICE_RLAN_CTX_HBUF_S;
        rx_ctx.dtype = 0; /* No Header Split mode */
        rx_ctx.dsize = 1; /* 32B descriptors */
-       rx_ctx.rxmax = RTE_ETHER_MAX_LEN;
+       rx_ctx.rxmax = ICE_ETH_MAX_LEN;
        /* TPH: Transaction Layer Packet (TLP) processing hints */
        rx_ctx.tphrdesc_ena = 1;
        rx_ctx.tphwdesc_ena = 1;
@@ -3880,6 +3906,50 @@ ice_get_default_pkt_type(uint16_t ptype)
                        RTE_PTYPE_TUNNEL_GTPU |
                        RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN |
                        RTE_PTYPE_INNER_L4_ICMP,
+
+               /* IPv4 --> UDP ECPRI */
+               [372] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [373] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [374] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [375] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [376] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [377] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [378] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [379] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [380] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [381] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+
+               /* IPV6 --> UDP ECPRI */
+               [382] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [383] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [384] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [385] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [386] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [387] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [388] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [389] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [390] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
+               [391] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN |
+                       RTE_PTYPE_L4_UDP,
                /* All others reserved */
        };