ethdev: replace bus specific struct with generic dev
[dpdk.git] / drivers / net / enic / enic_ethdev.c
index cbab702..801f470 100644 (file)
@@ -39,6 +39,19 @@ static const struct rte_pci_id pci_id_enic_map[] = {
        {.vendor_id = 0, /* sentinel */},
 };
 
+#define ENIC_TX_OFFLOAD_CAPA (                 \
+               DEV_TX_OFFLOAD_VLAN_INSERT |    \
+               DEV_TX_OFFLOAD_IPV4_CKSUM  |    \
+               DEV_TX_OFFLOAD_UDP_CKSUM   |    \
+               DEV_TX_OFFLOAD_TCP_CKSUM   |    \
+               DEV_TX_OFFLOAD_TCP_TSO)
+
+#define ENIC_RX_OFFLOAD_CAPA (                 \
+               DEV_RX_OFFLOAD_VLAN_STRIP |     \
+               DEV_RX_OFFLOAD_IPV4_CKSUM |     \
+               DEV_RX_OFFLOAD_UDP_CKSUM  |     \
+               DEV_RX_OFFLOAD_TCP_CKSUM)
+
 RTE_INIT(enicpmd_init_log);
 static void
 enicpmd_init_log(void)
@@ -318,40 +331,29 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
        return enicpmd_dev_setup_intr(enic);
 }
 
-static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
-       uint16_t vlan_id, int on)
-{
-       struct enic *enic = pmd_priv(eth_dev);
-       int err;
-
-       ENICPMD_FUNC_TRACE();
-       if (on)
-               err = enic_add_vlan(enic, vlan_id);
-       else
-               err = enic_del_vlan(enic, vlan_id);
-       return err;
-}
-
 static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 {
        struct enic *enic = pmd_priv(eth_dev);
+       uint64_t offloads;
 
        ENICPMD_FUNC_TRACE();
 
+       offloads = eth_dev->data->dev_conf.rxmode.offloads;
        if (mask & ETH_VLAN_STRIP_MASK) {
-               if (eth_dev->data->dev_conf.rxmode.offloads &
-                   DEV_RX_OFFLOAD_VLAN_STRIP)
+               if (offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
                        enic->ig_vlan_strip_en = 1;
                else
                        enic->ig_vlan_strip_en = 0;
        }
 
-       if (mask & ETH_VLAN_FILTER_MASK) {
+       if ((mask & ETH_VLAN_FILTER_MASK) &&
+           (offloads & DEV_RX_OFFLOAD_VLAN_FILTER)) {
                dev_warning(enic,
                        "Configuration of VLAN filter is not supported\n");
        }
 
-       if (mask & ETH_VLAN_EXTEND_MASK) {
+       if ((mask & ETH_VLAN_EXTEND_MASK) &&
+           (offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)) {
                dev_warning(enic,
                        "Configuration of extended VLAN is not supported\n");
        }
@@ -362,6 +364,7 @@ static int enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
 static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
 {
        int ret;
+       int mask;
        struct enic *enic = pmd_priv(eth_dev);
 
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
@@ -376,7 +379,11 @@ static int enicpmd_dev_configure(struct rte_eth_dev *eth_dev)
 
        enic->hw_ip_checksum = !!(eth_dev->data->dev_conf.rxmode.offloads &
                                  DEV_RX_OFFLOAD_CHECKSUM);
-       ret = enicpmd_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK);
+       /* All vlan offload masks to apply the current settings */
+       mask = ETH_VLAN_STRIP_MASK |
+               ETH_VLAN_FILTER_MASK |
+               ETH_VLAN_EXTEND_MASK;
+       ret = enicpmd_vlan_offload_set(eth_dev, mask);
        if (ret) {
                dev_err(enic, "Failed to configure VLAN offloads\n");
                return ret;
@@ -416,10 +423,9 @@ static void enicpmd_dev_stop(struct rte_eth_dev *eth_dev)
 
        ENICPMD_FUNC_TRACE();
        enic_disable(enic);
+
        memset(&link, 0, sizeof(link));
-       rte_atomic64_cmpset((uint64_t *)&eth_dev->data->dev_link,
-               *(uint64_t *)&eth_dev->data->dev_link,
-               *(uint64_t *)&link);
+       rte_eth_linkstatus_set(eth_dev, &link);
 }
 
 /*
@@ -465,24 +471,21 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
        struct enic *enic = pmd_priv(eth_dev);
 
        ENICPMD_FUNC_TRACE();
-       device_info->pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        /* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
        device_info->max_rx_queues = enic->conf_rq_count / 2;
        device_info->max_tx_queues = enic->conf_wq_count;
        device_info->min_rx_bufsize = ENIC_MIN_MTU;
-       device_info->max_rx_pktlen = enic->max_mtu + ETHER_HDR_LEN + 4;
+       /* "Max" mtu is not a typo. HW receives packet sizes up to the
+        * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
+        * a hint to the driver to size receive buffers accordingly so that
+        * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
+        * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
+        * ignoring vNIC mtu.
+        */
+       device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
        device_info->max_mac_addrs = ENIC_MAX_MAC_ADDR;
-       device_info->rx_offload_capa =
-               DEV_RX_OFFLOAD_VLAN_STRIP |
-               DEV_RX_OFFLOAD_IPV4_CKSUM |
-               DEV_RX_OFFLOAD_UDP_CKSUM  |
-               DEV_RX_OFFLOAD_TCP_CKSUM;
-       device_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_VLAN_INSERT |
-               DEV_TX_OFFLOAD_IPV4_CKSUM  |
-               DEV_TX_OFFLOAD_UDP_CKSUM   |
-               DEV_TX_OFFLOAD_TCP_CKSUM   |
-               DEV_TX_OFFLOAD_TCP_TSO;
+       device_info->rx_offload_capa = ENIC_RX_OFFLOAD_CAPA;
+       device_info->tx_offload_capa = ENIC_TX_OFFLOAD_CAPA;
        device_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
        };
@@ -685,6 +688,74 @@ static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
        return 0;
 }
 
+static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
+                                    uint16_t rx_queue_id,
+                                    struct rte_eth_rxq_info *qinfo)
+{
+       struct enic *enic = pmd_priv(dev);
+       struct vnic_rq *rq_sop;
+       struct vnic_rq *rq_data;
+       struct rte_eth_rxconf *conf;
+       uint16_t sop_queue_idx;
+       uint16_t data_queue_idx;
+
+       ENICPMD_FUNC_TRACE();
+       sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
+       data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
+       rq_sop = &enic->rq[sop_queue_idx];
+       rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
+       qinfo->mp = rq_sop->mp;
+       qinfo->scattered_rx = rq_sop->data_queue_enable;
+       qinfo->nb_desc = rq_sop->ring.desc_count;
+       if (qinfo->scattered_rx)
+               qinfo->nb_desc += rq_data->ring.desc_count;
+       conf = &qinfo->conf;
+       memset(conf, 0, sizeof(*conf));
+       conf->rx_free_thresh = rq_sop->rx_free_thresh;
+       conf->rx_drop_en = 1;
+       /*
+        * Except VLAN stripping (port setting), all the checksum offloads
+        * are always enabled.
+        */
+       conf->offloads = ENIC_RX_OFFLOAD_CAPA;
+       if (!enic->ig_vlan_strip_en)
+               conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
+       /* rx_thresh and other fields are not applicable for enic */
+}
+
+static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
+                                    __rte_unused uint16_t tx_queue_id,
+                                    struct rte_eth_txq_info *qinfo)
+{
+       struct enic *enic = pmd_priv(dev);
+
+       ENICPMD_FUNC_TRACE();
+       qinfo->nb_desc = enic->config.wq_desc_count;
+       memset(&qinfo->conf, 0, sizeof(qinfo->conf));
+       qinfo->conf.offloads = ENIC_TX_OFFLOAD_CAPA; /* not configurable */
+       /* tx_thresh, and all the other fields are not applicable for enic */
+}
+
+static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
+                                           uint16_t rx_queue_id)
+{
+       struct enic *enic = pmd_priv(eth_dev);
+
+       ENICPMD_FUNC_TRACE();
+       vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+       return 0;
+}
+
+static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
+                                            uint16_t rx_queue_id)
+{
+       struct enic *enic = pmd_priv(eth_dev);
+
+       ENICPMD_FUNC_TRACE();
+       vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
+       return 0;
+}
+
 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .dev_configure        = enicpmd_dev_configure,
        .dev_start            = enicpmd_dev_start,
@@ -703,7 +774,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .dev_infos_get        = enicpmd_dev_info_get,
        .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
        .mtu_set              = enicpmd_mtu_set,
-       .vlan_filter_set      = enicpmd_vlan_filter_set,
+       .vlan_filter_set      = NULL,
        .vlan_tpid_set        = NULL,
        .vlan_offload_set     = enicpmd_vlan_offload_set,
        .vlan_strip_queue_set = NULL,
@@ -717,6 +788,10 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .rx_descriptor_done   = NULL,
        .tx_queue_setup       = enicpmd_dev_tx_queue_setup,
        .tx_queue_release     = enicpmd_dev_tx_queue_release,
+       .rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
+       .rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
+       .rxq_info_get         = enicpmd_dev_rxq_info_get,
+       .txq_info_get         = enicpmd_dev_txq_info_get,
        .dev_led_on           = NULL,
        .dev_led_off          = NULL,
        .flow_ctrl_get        = NULL,