net/i40e: convert to new Rx offloads API
authorYanglong Wu <yanglong.wu@intel.com>
Fri, 30 Mar 2018 08:22:11 +0000 (16:22 +0800)
committerFerruh Yigit <ferruh.yigit@intel.com>
Fri, 13 Apr 2018 22:40:21 +0000 (00:40 +0200)
Ethdev Rx offloads API has changed since:
commit cba7f53b717d ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Yanglong Wu <yanglong.wu@intel.com>
Acked-by: Qi Zhang <qi.z.zhang@intel.com>
drivers/net/i40e/i40e_ethdev.c
drivers/net/i40e/i40e_ethdev_vf.c
drivers/net/i40e/i40e_flow.c
drivers/net/i40e/i40e_rxtx.c
drivers/net/i40e/i40e_rxtx.h

index e00f402..b1d269b 100644 (file)
@@ -3219,6 +3219,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_rx_pktlen = I40E_FRAME_SIZE_MAX;
        dev_info->max_mac_addrs = vsi->max_macaddrs;
        dev_info->max_vfs = pci_dev->max_vfs;
+       dev_info->rx_queue_offload_capa = 0;
        dev_info->rx_offload_capa =
                DEV_RX_OFFLOAD_VLAN_STRIP |
                DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -3226,7 +3227,10 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_RX_OFFLOAD_UDP_CKSUM |
                DEV_RX_OFFLOAD_TCP_CKSUM |
                DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_CRC_STRIP;
+               DEV_RX_OFFLOAD_CRC_STRIP |
+               DEV_RX_OFFLOAD_VLAN_EXTEND |
+               DEV_RX_OFFLOAD_VLAN_FILTER;
+
        dev_info->tx_offload_capa =
                DEV_TX_OFFLOAD_VLAN_INSERT |
                DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -3253,6 +3257,7 @@ i40e_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
                .rx_drop_en = 0,
+               .offloads = 0,
        };
 
        dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -3372,7 +3377,8 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev,
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
-       int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+       int qinq = dev->data->dev_conf.rxmode.offloads &
+                  DEV_RX_OFFLOAD_VLAN_EXTEND;
        int ret = 0;
 
        if ((vlan_type != ETH_VLAN_TYPE_INNER &&
@@ -3420,9 +3426,11 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 {
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_vsi *vsi = pf->main_vsi;
+       struct rte_eth_rxmode *rxmode;
 
+       rxmode = &dev->data->dev_conf.rxmode;
        if (mask & ETH_VLAN_FILTER_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                        i40e_vsi_config_vlan_filter(vsi, TRUE);
                else
                        i40e_vsi_config_vlan_filter(vsi, FALSE);
@@ -3430,14 +3438,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 
        if (mask & ETH_VLAN_STRIP_MASK) {
                /* Enable or disable VLAN stripping */
-               if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        i40e_vsi_config_vlan_stripping(vsi, TRUE);
                else
                        i40e_vsi_config_vlan_stripping(vsi, FALSE);
        }
 
        if (mask & ETH_VLAN_EXTEND_MASK) {
-               if (dev->data->dev_conf.rxmode.hw_vlan_extend) {
+               if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_EXTEND) {
                        i40e_vsi_config_double_vlan(vsi, TRUE);
                        /* Set global registers with default ethertype. */
                        i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER,
@@ -3684,6 +3692,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
        struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
        struct i40e_mac_filter_info mac_filter;
        struct i40e_vsi *vsi;
+       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
        int ret;
 
        /* If VMDQ not enabled or configured, return */
@@ -3702,7 +3711,7 @@ i40e_macaddr_add(struct rte_eth_dev *dev,
        }
 
        rte_memcpy(&mac_filter.mac_addr, mac_addr, ETHER_ADDR_LEN);
-       if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+       if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
                mac_filter.filter_type = RTE_MACVLAN_PERFECT_MATCH;
        else
                mac_filter.filter_type = RTE_MAC_PERFECT_MATCH;
@@ -11354,9 +11363,11 @@ i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        }
 
        if (frame_size > ETHER_MAX_LEN)
-               dev_data->dev_conf.rxmode.jumbo_frame = 1;
+               dev_data->dev_conf.rxmode.offloads |=
+                       DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
-               dev_data->dev_conf.rxmode.jumbo_frame = 0;
+               dev_data->dev_conf.rxmode.offloads &=
+                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
index 750d849..03b7b5b 100644 (file)
@@ -1536,7 +1536,7 @@ i40evf_dev_configure(struct rte_eth_dev *dev)
        /* For non-DPDK PF drivers, VF has no ability to disable HW
         * CRC strip, and is implicitly enabled by the PF.
         */
-       if (!conf->rxmode.hw_strip_crc) {
+       if (!(conf->rxmode.offloads & DEV_RX_OFFLOAD_CRC_STRIP)) {
                vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
                if ((vf->version_major == VIRTCHNL_VERSION_MAJOR) &&
                    (vf->version_minor <= VIRTCHNL_VERSION_MINOR)) {
@@ -1570,7 +1570,7 @@ i40evf_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        /* Vlan stripping setting */
        if (mask & ETH_VLAN_STRIP_MASK) {
                /* Enable or disable VLAN stripping */
-               if (dev_conf->rxmode.hw_vlan_strip)
+               if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        i40evf_enable_vlan_strip(dev);
                else
                        i40evf_disable_vlan_strip(dev);
@@ -1727,7 +1727,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
        /**
         * Check if the jumbo frame and maximum packet length are set correctly
         */
-       if (dev_data->dev_conf.rxmode.jumbo_frame == 1) {
+       if (dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
                if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
                    rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
                        PMD_DRV_LOG(ERR, "maximum packet length must be "
@@ -1747,7 +1747,7 @@ i40evf_rxq_init(struct rte_eth_dev *dev, struct i40e_rx_queue *rxq)
                }
        }
 
-       if (dev_data->dev_conf.rxmode.enable_scatter ||
+       if ((dev_data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_SCATTER) ||
            (rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size) {
                dev_data->scattered_rx = 1;
        }
@@ -2192,6 +2192,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->reta_size = ETH_RSS_RETA_SIZE_64;
        dev_info->flow_type_rss_offloads = vf->adapter->flow_types_mask;
        dev_info->max_mac_addrs = I40E_NUM_MACADDR_MAX;
+       dev_info->rx_queue_offload_capa = 0;
        dev_info->rx_offload_capa =
                DEV_RX_OFFLOAD_VLAN_STRIP |
                DEV_RX_OFFLOAD_QINQ_STRIP |
@@ -2199,7 +2200,9 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                DEV_RX_OFFLOAD_UDP_CKSUM |
                DEV_RX_OFFLOAD_TCP_CKSUM |
                DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_CRC_STRIP;
+               DEV_RX_OFFLOAD_CRC_STRIP |
+               DEV_RX_OFFLOAD_SCATTER;
+
        dev_info->tx_offload_capa =
                DEV_TX_OFFLOAD_VLAN_INSERT |
                DEV_TX_OFFLOAD_QINQ_INSERT |
@@ -2222,6 +2225,7 @@ i40evf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                },
                .rx_free_thresh = I40E_DEFAULT_RX_FREE_THRESH,
                .rx_drop_en = 0,
+               .offloads = 0,
        };
 
        dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -2652,10 +2656,11 @@ i40evf_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        }
 
        if (frame_size > ETHER_MAX_LEN)
-               dev_data->dev_conf.rxmode.jumbo_frame = 1;
+               dev_data->dev_conf.rxmode.offloads |=
+                       DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
-               dev_data->dev_conf.rxmode.jumbo_frame = 0;
-
+               dev_data->dev_conf.rxmode.offloads &=
+                       ~DEV_RX_OFFLOAD_JUMBO_FRAME;
        dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size;
 
        return ret;
index 69d1ba5..5ba87bf 100644 (file)
@@ -1939,7 +1939,8 @@ static uint16_t
 i40e_get_outer_vlan(struct rte_eth_dev *dev)
 {
        struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
+       int qinq = dev->data->dev_conf.rxmode.offloads &
+               DEV_RX_OFFLOAD_VLAN_EXTEND;
        uint64_t reg_r = 0;
        uint16_t reg_id;
        uint16_t tpid;
index 56a854c..09d3b38 100644 (file)
@@ -1692,6 +1692,20 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev)
        return NULL;
 }
 
+static int
+i40e_check_rx_queue_offloads(struct rte_eth_dev *dev, uint64_t requested)
+{
+       struct rte_eth_dev_info dev_info;
+       uint64_t mandatory = dev->data->dev_conf.rxmode.offloads;
+       uint64_t supported; /* All per port offloads */
+
+       dev->dev_ops->dev_infos_get(dev, &dev_info);
+       supported = dev_info.rx_offload_capa ^ dev_info.rx_queue_offload_capa;
+       if ((requested & dev_info.rx_offload_capa) != requested)
+               return 0; /* requested range check */
+       return !((mandatory ^ requested) & supported);
+}
+
 int
 i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
                        uint16_t queue_idx,
@@ -1712,6 +1726,18 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        uint16_t len, i;
        uint16_t reg_idx, base, bsf, tc_mapping;
        int q_offset, use_def_burst_func = 1;
+       struct rte_eth_dev_info dev_info;
+
+       if (!i40e_check_rx_queue_offloads(dev, rx_conf->offloads)) {
+               dev->dev_ops->dev_infos_get(dev, &dev_info);
+               PMD_INIT_LOG(ERR, "%p: Rx queue offloads 0x%" PRIx64
+                       " don't match port  offloads 0x%" PRIx64
+                       " or supported offloads 0x%" PRIx64,
+                       (void *)dev, rx_conf->offloads,
+                       dev->data->dev_conf.rxmode.offloads,
+                       dev_info.rx_offload_capa);
+               return -ENOTSUP;
+       }
 
        if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) {
                vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
@@ -1760,8 +1786,8 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->queue_id = queue_idx;
        rxq->reg_idx = reg_idx;
        rxq->port_id = dev->data->port_id;
-       rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ?
-                                                       0 : ETHER_CRC_LEN);
+       rxq->crc_len = (uint8_t)((dev->data->dev_conf.rxmode.offloads &
+                       DEV_RX_OFFLOAD_CRC_STRIP) ? 0 : ETHER_CRC_LEN);
        rxq->drop_en = rx_conf->rx_drop_en;
        rxq->vsi = vsi;
        rxq->rx_deferred_start = rx_conf->rx_deferred_start;
@@ -2469,7 +2495,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq)
 
        len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len;
        rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len);
-       if (data->dev_conf.rxmode.jumbo_frame == 1) {
+       if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
                if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
                        rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
                        PMD_DRV_LOG(ERR, "maximum packet length must "
@@ -2747,6 +2773,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
        qinfo->conf.rx_free_thresh = rxq->rx_free_thresh;
        qinfo->conf.rx_drop_en = rxq->drop_en;
        qinfo->conf.rx_deferred_start = rxq->rx_deferred_start;
+       qinfo->conf.offloads = rxq->offloads;
 }
 
 void
index 34cd792..cb5f8c7 100644 (file)
@@ -107,6 +107,7 @@ struct i40e_rx_queue {
        bool rx_deferred_start; /**< don't start this queue in dev start */
        uint16_t rx_using_sse; /**<flag indicate the usage of vPMD for rx */
        uint8_t dcb_tc;         /**< Traffic class of rx queue */
+       uint64_t offloads; /**< Rx offload flags of DEV_RX_OFFLOAD_* */
 };
 
 struct i40e_tx_entry {