net/sfc: convert to new Rx offload API
authorIvan Malov <ivan.malov@oktetlabs.ru>
Thu, 18 Jan 2018 09:44:29 +0000 (09:44 +0000)
committerFerruh Yigit <ferruh.yigit@intel.com>
Sun, 21 Jan 2018 14:51:52 +0000 (15:51 +0100)
Ethdev Rx offloads API has changed since:
commit ce17eddefc20 ("ethdev: introduce Rx queue offloads API")
This commit support the new Rx offloads API.

Signed-off-by: Ivan Malov <ivan.malov@oktetlabs.ru>
Signed-off-by: Andrew Rybchenko <arybchenko@solarflare.com>
Reviewed-by: Ferruh Yigit <ferruh.yigit@intel.com>
drivers/net/sfc/sfc_ethdev.c
drivers/net/sfc/sfc_port.c
drivers/net/sfc/sfc_rx.c
drivers/net/sfc/sfc_rx.h

index 851b38b..0244a0f 100644 (file)
@@ -104,7 +104,15 @@ sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        /* By default packets are dropped if no descriptors are available */
        dev_info->default_rxconf.rx_drop_en = 1;
 
-       dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa);
+       dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
+
+       /*
+        * rx_offload_capa includes both device and queue offloads since
+        * the latter may be requested on a per device basis which makes
+        * sense when some offloads are needed to be set on all queues.
+        */
+       dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
+                                   dev_info->rx_queue_offload_capa;
 
        dev_info->tx_offload_capa =
                DEV_TX_OFFLOAD_IPV4_CKSUM |
@@ -882,7 +890,13 @@ sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
         * The driver does not use it, but other PMDs update jumbo_frame
         * flag and max_rx_pkt_len when MTU is set.
         */
-       dev->data->dev_conf.rxmode.jumbo_frame = (mtu > ETHER_MAX_LEN);
+       if (mtu > ETHER_MAX_LEN) {
+               struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
+
+               rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+               rxmode->jumbo_frame = 1;
+       }
+
        dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
 
        sfc_adapter_unlock(sa);
@@ -1045,8 +1059,13 @@ sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
        qinfo->conf.rx_free_thresh = rxq->refill_threshold;
        qinfo->conf.rx_drop_en = 1;
        qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
-       qinfo->scattered_rx =
-               ((rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) != 0);
+       qinfo->conf.offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
+                              DEV_RX_OFFLOAD_UDP_CKSUM |
+                              DEV_RX_OFFLOAD_TCP_CKSUM;
+       if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
+               qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
+               qinfo->scattered_rx = 1;
+       }
        qinfo->nb_desc = rxq_info->entries;
 
        sfc_adapter_unlock(sa);
index a48388d..c423f52 100644 (file)
@@ -299,11 +299,12 @@ sfc_port_configure(struct sfc_adapter *sa)
 {
        const struct rte_eth_dev_data *dev_data = sa->eth_dev->data;
        struct sfc_port *port = &sa->port;
+       const struct rte_eth_rxmode *rxmode = &dev_data->dev_conf.rxmode;
 
        sfc_log_init(sa, "entry");
 
-       if (dev_data->dev_conf.rxmode.jumbo_frame)
-               port->pdu = dev_data->dev_conf.rxmode.max_rx_pkt_len;
+       if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+               port->pdu = rxmode->max_rx_pkt_len;
        else
                port->pdu = EFX_MAC_PDU(dev_data->mtu);
 
index d35f4f7..abc53fb 100644 (file)
@@ -768,6 +768,8 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
        const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
        uint64_t caps = 0;
 
+       caps |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+       caps |= DEV_RX_OFFLOAD_CRC_STRIP;
        caps |= DEV_RX_OFFLOAD_IPV4_CKSUM;
        caps |= DEV_RX_OFFLOAD_UDP_CKSUM;
        caps |= DEV_RX_OFFLOAD_TCP_CKSUM;
@@ -779,10 +781,62 @@ sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
        return caps;
 }
 
+uint64_t
+sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
+{
+       uint64_t caps = 0;
+
+       if (sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)
+               caps |= DEV_RX_OFFLOAD_SCATTER;
+
+       return caps;
+}
+
+static void
+sfc_rx_log_offloads(struct sfc_adapter *sa, const char *offload_group,
+                   const char *verdict, uint64_t offloads)
+{
+       unsigned long long bit;
+
+       while ((bit = __builtin_ffsll(offloads)) != 0) {
+               uint64_t flag = (1ULL << --bit);
+
+               sfc_err(sa, "Rx %s offload %s %s", offload_group,
+                       rte_eth_dev_rx_offload_name(flag), verdict);
+
+               offloads &= ~flag;
+       }
+}
+
+static boolean_t
+sfc_rx_queue_offloads_mismatch(struct sfc_adapter *sa, uint64_t requested)
+{
+       uint64_t mandatory = sa->eth_dev->data->dev_conf.rxmode.offloads;
+       uint64_t supported = sfc_rx_get_dev_offload_caps(sa) |
+                            sfc_rx_get_queue_offload_caps(sa);
+       uint64_t rejected = requested & ~supported;
+       uint64_t missing = (requested & mandatory) ^ mandatory;
+       boolean_t mismatch = B_FALSE;
+
+       if (rejected) {
+               sfc_rx_log_offloads(sa, "queue", "is unsupported", rejected);
+               mismatch = B_TRUE;
+       }
+
+       if (missing) {
+               sfc_rx_log_offloads(sa, "queue", "must be set", missing);
+               mismatch = B_TRUE;
+       }
+
+       return mismatch;
+}
+
 static int
 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
                   const struct rte_eth_rxconf *rx_conf)
 {
+       uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
+                                     sfc_rx_get_queue_offload_caps(sa);
        int rc = 0;
 
        if (rx_conf->rx_thresh.pthresh != 0 ||
@@ -804,6 +858,17 @@ sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
                rc = EINVAL;
        }
 
+       if ((rx_conf->offloads & DEV_RX_OFFLOAD_CHECKSUM) !=
+           DEV_RX_OFFLOAD_CHECKSUM)
+               sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
+
+       if ((offloads_supported & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
+           (~rx_conf->offloads & DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM))
+               sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
+
+       if (sfc_rx_queue_offloads_mismatch(sa, rx_conf->offloads))
+               rc = EINVAL;
+
        return rc;
 }
 
@@ -946,7 +1011,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
        }
 
        if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
-           !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
+           (~rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)) {
                sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
                        "object size is too small", sw_index);
                sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
@@ -964,7 +1029,7 @@ sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
        rxq_info->entries = rxq_entries;
        rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
        rxq_info->type_flags =
-               sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
+               (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) ?
                EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
 
        if ((encp->enc_tunnel_encapsulations_supported != 0) &&
@@ -1227,6 +1292,9 @@ sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
 static int
 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
 {
+       uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
+                                     sfc_rx_get_queue_offload_caps(sa);
+       uint64_t offloads_rejected = rxmode->offloads & ~offloads_supported;
        int rc = 0;
 
        switch (rxmode->mq_mode) {
@@ -1247,45 +1315,18 @@ sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
                rc = EINVAL;
        }
 
-       if (rxmode->header_split) {
-               sfc_err(sa, "Header split on Rx not supported");
-               rc = EINVAL;
-       }
-
-       if (rxmode->hw_vlan_filter) {
-               sfc_err(sa, "HW VLAN filtering not supported");
-               rc = EINVAL;
-       }
-
-       if (rxmode->hw_vlan_strip) {
-               sfc_err(sa, "HW VLAN stripping not supported");
+       if (offloads_rejected) {
+               sfc_rx_log_offloads(sa, "device", "is unsupported",
+                                   offloads_rejected);
                rc = EINVAL;
        }
 
-       if (rxmode->hw_vlan_extend) {
-               sfc_err(sa,
-                       "Q-in-Q HW VLAN stripping not supported");
-               rc = EINVAL;
-       }
-
-       if (!rxmode->hw_strip_crc) {
-               sfc_warn(sa,
-                        "FCS stripping control not supported - always stripped");
+       if (~rxmode->offloads & DEV_RX_OFFLOAD_CRC_STRIP) {
+               sfc_warn(sa, "FCS stripping cannot be disabled - always on");
+               rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
                rxmode->hw_strip_crc = 1;
        }
 
-       if (rxmode->enable_scatter &&
-           (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
-               sfc_err(sa, "Rx scatter not supported by %s datapath",
-                       sa->dp_rx->dp.name);
-               rc = EINVAL;
-       }
-
-       if (rxmode->enable_lro) {
-               sfc_err(sa, "LRO not supported");
-               rc = EINVAL;
-       }
-
        return rc;
 }
 
index cc9245f..8c0fa71 100644 (file)
@@ -143,6 +143,7 @@ int sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index);
 void sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index);
 
 uint64_t sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa);
+uint64_t sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa);
 
 void sfc_rx_qflush_done(struct sfc_rxq *rxq);
 void sfc_rx_qflush_failed(struct sfc_rxq *rxq);