X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_ethdev.c;h=6bea940501cad5031d0118d21ebd873add500b9e;hb=9115326d99f0f03c163b04d6379bd3b8969db6c0;hp=69ad01bf57438e8f8d918616325fdf20951c4f06;hpb=72f3de308fce9d6b537440f5f1a414c069d2c79d;p=dpdk.git diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index 69ad01bf57..6bea940501 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -31,7 +31,6 @@ * POSSIBILITY OF SUCH DAMAGE. * */ -#ident "$Id$" #include #include @@ -175,6 +174,13 @@ static int enicpmd_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); + if (queue_idx >= ENIC_WQ_MAX) { + dev_err(enic, + "Max number of TX queues exceeded. Max is %d\n", + ENIC_WQ_MAX); + return -EINVAL; + } + eth_dev->data->tx_queues[queue_idx] = (void *)&enic->wq[queue_idx]; ret = enic_alloc_wq(enic, queue_idx, socket_id, nb_desc); @@ -194,6 +200,7 @@ static int enicpmd_dev_tx_queue_start(struct rte_eth_dev *eth_dev, ENICPMD_FUNC_TRACE(); enic_start_wq(enic, queue_idx); + eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -209,6 +216,8 @@ static int enicpmd_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, ret = enic_stop_wq(enic, queue_idx); if (ret) dev_err(enic, "error in stopping wq %d\n", queue_idx); + else + eth_dev->data->tx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; return ret; } @@ -221,6 +230,7 @@ static int enicpmd_dev_rx_queue_start(struct rte_eth_dev *eth_dev, ENICPMD_FUNC_TRACE(); enic_start_rq(enic, queue_idx); + eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STARTED; return 0; } @@ -236,6 +246,8 @@ static int enicpmd_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, ret = enic_stop_rq(enic, queue_idx); if (ret) dev_err(enic, "error in stopping rq %d\n", queue_idx); + else + eth_dev->data->rx_queue_state[queue_idx] = RTE_ETH_QUEUE_STATE_STOPPED; return ret; } @@ -250,13 +262,20 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, - __rte_unused const struct rte_eth_rxconf *rx_conf, + const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { int ret; struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); + if (queue_idx >= ENIC_RQ_MAX) { + dev_err(enic, + "Max number of RX queues exceeded. Max is %d\n", + ENIC_RQ_MAX); + return -EINVAL; + } + eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx]; ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); @@ -265,6 +284,10 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, return ret; } + enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh; + dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx, + enic->rq[queue_idx].rx_free_thresh); + return enicpmd_dev_setup_intr(enic); } @@ -272,13 +295,14 @@ static int enicpmd_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) { struct enic *enic = pmd_priv(eth_dev); + int err; ENICPMD_FUNC_TRACE(); if (on) - enic_add_vlan(enic, vlan_id); + err = enic_add_vlan(enic, vlan_id); else - enic_del_vlan(enic, vlan_id); - return 0; + err = enic_del_vlan(enic, vlan_id); + return err; } static void enicpmd_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) @@ -423,6 +447,22 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; + device_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH + }; +} + +static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_UNKNOWN + }; + + if (dev->rx_pkt_burst == enic_recv_pkts) + return ptypes; + return NULL; } static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev) @@ -483,42 +523,60 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - unsigned int index; + uint16_t index; unsigned int frags; unsigned int pkt_len; unsigned int seg_len; unsigned int inc_len; unsigned int nb_segs; - struct rte_mbuf *tx_pkt; + struct rte_mbuf *tx_pkt, *next_tx_pkt; struct vnic_wq *wq = (struct vnic_wq *)tx_queue; struct enic *enic = vnic_dev_priv(wq->vdev); unsigned short vlan_id; unsigned short ol_flags; + uint8_t last_seg, eop; + unsigned int host_tx_descs = 0; for (index = 0; index < nb_pkts; index++) { tx_pkt = *tx_pkts++; inc_len = 0; nb_segs = tx_pkt->nb_segs; if (nb_segs > vnic_wq_desc_avail(wq)) { + if (index > 0) + enic_post_wq_index(wq); + /* wq cleanup and try again */ if (!enic_cleanup_wq(enic, wq) || - (nb_segs > vnic_wq_desc_avail(wq))) + (nb_segs > vnic_wq_desc_avail(wq))) { return index; + } } + pkt_len = tx_pkt->pkt_len; vlan_id = tx_pkt->vlan_tci; ol_flags = tx_pkt->ol_flags; for (frags = 0; inc_len < pkt_len; frags++) { if (!tx_pkt) break; + next_tx_pkt = tx_pkt->next; seg_len = tx_pkt->data_len; inc_len += seg_len; - if (enic_send_pkt(enic, wq, tx_pkt, - (unsigned short)seg_len, !frags, - (pkt_len == inc_len), ol_flags, vlan_id)) { - break; + + host_tx_descs++; + last_seg = 0; + eop = 0; + if ((pkt_len == inc_len) || !next_tx_pkt) { + eop = 1; + /* post if last packet in batch or > thresh */ + if ((index == (nb_pkts - 1)) || + (host_tx_descs > ENIC_TX_POST_THRESH)) { + last_seg = 1; + host_tx_descs = 0; + } } - tx_pkt = tx_pkt->next; + enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, + !frags, eop, last_seg, ol_flags, vlan_id); + tx_pkt = next_tx_pkt; } } @@ -526,18 +584,6 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return index; } -static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) -{ - struct vnic_rq *rq = (struct vnic_rq *)rx_queue; - unsigned int work_done; - - if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done)) - dev_err(enic, "error in enicpmd poll\n"); - - return work_done; -} - static const struct eth_dev_ops enicpmd_eth_dev_ops = { .dev_configure = enicpmd_dev_configure, .dev_start = enicpmd_dev_start, @@ -554,6 +600,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .stats_reset = enicpmd_dev_stats_reset, .queue_stats_mapping_set = NULL, .dev_infos_get = enicpmd_dev_info_get, + .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, .mtu_set = NULL, .vlan_filter_set = enicpmd_vlan_filter_set, .vlan_tpid_set = NULL, @@ -576,7 +623,6 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .priority_flow_ctrl_set = NULL, .mac_addr_add = enicpmd_add_mac_addr, .mac_addr_remove = enicpmd_remove_mac_addr, - .fdir_set_masks = NULL, .filter_ctrl = enicpmd_dev_filter_ctrl, }; @@ -595,10 +641,11 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) enic->port_id = eth_dev->data->port_id; enic->rte_dev = eth_dev; eth_dev->dev_ops = &enicpmd_eth_dev_ops; - eth_dev->rx_pkt_burst = &enicpmd_recv_pkts; + eth_dev->rx_pkt_burst = &enic_recv_pkts; eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; pdev = eth_dev->pci_dev; + rte_eth_copy_pci_info(eth_dev, pdev); enic->pdev = pdev; addr = &pdev->addr; @@ -609,7 +656,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) } static struct eth_driver rte_enic_pmd = { - { + .pci_drv = { .name = "rte_enic_pmd", .id_table = pci_id_enic_map, .drv_flags = RTE_PCI_DRV_NEED_MAPPING, @@ -623,8 +670,8 @@ static struct eth_driver rte_enic_pmd = { * Register as the [Poll Mode] Driver of Cisco ENIC device. */ static int -rte_enic_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) +rte_enic_pmd_init(__rte_unused const char *name, + __rte_unused const char *params) { ENICPMD_FUNC_TRACE();