ethdev: query supported packet types
[dpdk.git] / drivers / net / enic / enic_ethdev.c
index 3aef196..bab0f7d 100644 (file)
@@ -193,6 +193,7 @@ static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev,
        ENICPMD_FUNC_TRACE();
 
        enic_start_wq(enic, queue_idx);
+       eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 
        return 0;
 }
@@ -208,6 +209,8 @@ static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev,
        ret = enic_stop_wq(enic, queue_idx);
        if (ret)
                dev_err(enic, "error in stopping wq %d\n", queue_idx);
+       else
+               eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
 
        return ret;
 }
@@ -220,6 +223,7 @@ static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev,
        ENICPMD_FUNC_TRACE();
 
        enic_start_rq(enic, queue_idx);
+       eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED;
 
        return 0;
 }
@@ -235,6 +239,8 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev,
        ret = enic_stop_rq(enic, queue_idx);
        if (ret)
                dev_err(enic, "error in stopping rq %d\n", queue_idx);
+       else
+               eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED;
 
        return ret;
 }
@@ -249,7 +255,7 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
        uint16_t queue_idx,
        uint16_t nb_desc,
        unsigned int socket_id,
-       __rte_unused const struct rte_eth_rxconf *rx_conf,
+       const struct rte_eth_rxconf *rx_conf,
        struct rte_mempool *mp)
 {
        int ret;
@@ -264,6 +270,10 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
                return ret;
        }
 
+       enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh;
+       dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
+                       enic->rq[queue_idx].rx_free_thresh);
+
        return enicpmd_dev_setup_intr(enic);
 }
 
@@ -271,13 +281,14 @@ static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev,
        uint16_t vlan_id, int on)
 {
        struct enic *enic = pmd_priv(eth_dev);
+       int err;
 
        ENICPMD_FUNC_TRACE();
        if (on)
-               enic_add_vlan(enic, vlan_id);
+               err = enic_add_vlan(enic, vlan_id);
        else
-               enic_del_vlan(enic, vlan_id);
-       return 0;
+               err = enic_del_vlan(enic, vlan_id);
+       return err;
 }
 
 static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask)
@@ -422,6 +433,22 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
                DEV_TX_OFFLOAD_IPV4_CKSUM  |
                DEV_TX_OFFLOAD_UDP_CKSUM   |
                DEV_TX_OFFLOAD_TCP_CKSUM;
+       device_info->default_rxconf = (struct rte_eth_rxconf) {
+               .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
+       };
+}
+
+static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
+{
+       static const uint32_t ptypes[] = {
+               RTE_PTYPE_L3_IPV4,
+               RTE_PTYPE_L3_IPV6,
+               RTE_PTYPE_UNKNOWN
+       };
+
+       if (dev->rx_pkt_burst == enic_recv_pkts)
+               return ptypes;
+       return NULL;
 }
 
 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
@@ -488,21 +515,26 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        unsigned int seg_len;
        unsigned int inc_len;
        unsigned int nb_segs;
-       struct rte_mbuf *tx_pkt;
+       struct rte_mbuf *tx_pkt, *next_tx_pkt;
        struct vnic_wq *wq = (struct vnic_wq *)tx_queue;
        struct enic *enic = vnic_dev_priv(wq->vdev);
        unsigned short vlan_id;
        unsigned short ol_flags;
+       uint8_t last_seg, eop;
 
        for (index = 0; index < nb_pkts; index++) {
                tx_pkt = *tx_pkts++;
                inc_len = 0;
                nb_segs = tx_pkt->nb_segs;
                if (nb_segs > vnic_wq_desc_avail(wq)) {
+                       if (index > 0)
+                               enic_post_wq_index(wq);
+
                        /* wq cleanup and try again */
                        if (!enic_cleanup_wq(enic, wq) ||
-                               (nb_segs > vnic_wq_desc_avail(wq)))
+                               (nb_segs > vnic_wq_desc_avail(wq))) {
                                return index;
+                       }
                }
                pkt_len = tx_pkt->pkt_len;
                vlan_id = tx_pkt->vlan_tci;
@@ -510,14 +542,15 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                for (frags = 0; inc_len < pkt_len; frags++) {
                        if (!tx_pkt)
                                break;
+                       next_tx_pkt = tx_pkt->next;
                        seg_len = tx_pkt->data_len;
                        inc_len += seg_len;
-                       if (enic_send_pkt(enic, wq, tx_pkt,
-                                   (unsigned short)seg_len, !frags,
-                                   (pkt_len == inc_len), ol_flags, vlan_id)) {
-                               break;
-                       }
-                       tx_pkt = tx_pkt->next;
+                       eop = (pkt_len == inc_len) || (!next_tx_pkt);
+                       last_seg = eop &&
+                               (index == ((unsigned int)nb_pkts - 1));
+                       enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len,
+                                     !frags, eop, last_seg, ol_flags, vlan_id);
+                       tx_pkt = next_tx_pkt;
                }
        }
 
@@ -525,18 +558,6 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        return index;
 }
 
-static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
-       uint16_t nb_pkts)
-{
-       struct vnic_rq *rq = (struct vnic_rq *)rx_queue;
-       unsigned int work_done;
-
-       if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done))
-               dev_err(enic, "error in enicpmd poll\n");
-
-       return work_done;
-}
-
 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .dev_configure        = enicpmd_dev_configure,
        .dev_start            = enicpmd_dev_start,
@@ -553,6 +574,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .stats_reset          = enicpmd_dev_stats_reset,
        .queue_stats_mapping_set = NULL,
        .dev_infos_get        = enicpmd_dev_info_get,
+       .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
        .mtu_set              = NULL,
        .vlan_filter_set      = enicpmd_vlan_filter_set,
        .vlan_tpid_set        = NULL,
@@ -575,7 +597,6 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = {
        .priority_flow_ctrl_set = NULL,
        .mac_addr_add         = enicpmd_add_mac_addr,
        .mac_addr_remove      = enicpmd_remove_mac_addr,
-       .fdir_set_masks               = NULL,
        .filter_ctrl          = enicpmd_dev_filter_ctrl,
 };
 
@@ -594,10 +615,11 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
        enic->port_id = eth_dev->data->port_id;
        enic->rte_dev = eth_dev;
        eth_dev->dev_ops = &enicpmd_eth_dev_ops;
-       eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
+       eth_dev->rx_pkt_burst = &enic_recv_pkts;
        eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
 
        pdev = eth_dev->pci_dev;
+       rte_eth_copy_pci_info(eth_dev, pdev);
        enic->pdev = pdev;
        addr = &pdev->addr;
 
@@ -622,8 +644,8 @@ static struct eth_driver rte_enic_pmd = {
  * Register as the [Poll Mode] Driver of Cisco ENIC device.
  */
 static int
-rte_enic_pmd_init(const char *name __rte_unused,
-       const char *params __rte_unused)
+rte_enic_pmd_init(__rte_unused const char *name,
+        __rte_unused const char *params)
 {
        ENICPMD_FUNC_TRACE();