X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Fpfe%2Fpfe_ethdev.c;h=b1de866d34f03c64460161b58a031ea09d525f3d;hb=611faa5f46cc67449f272e14450fc6a0a275767d;hp=ac1e608c8ac68c0fedd6fa366ea033ecf19dad3f;hpb=5253fe372e4f6141d5675b3b29b249ffdee962d3;p=dpdk.git diff --git a/drivers/net/pfe/pfe_ethdev.c b/drivers/net/pfe/pfe_ethdev.c index ac1e608c8a..b1de866d34 100644 --- a/drivers/net/pfe/pfe_ethdev.c +++ b/drivers/net/pfe/pfe_ethdev.c @@ -1,22 +1,36 @@ /* SPDX-License-Identifier: BSD-3-Clause - * Copyright 2019 NXP + * Copyright 2018-2019 NXP */ +#include +#include #include #include #include +#include #include #include "pfe_logs.h" #include "pfe_mod.h" -#define PFE_MAX_MACS 1 /*we can support upto 4 MACs per IF*/ +#define PFE_MAX_MACS 1 /* we can support up to 4 MACs per IF */ #define PFE_VDEV_GEM_ID_ARG "intf" struct pfe_vdev_init_params { int8_t gem_id; }; static struct pfe *g_pfe; +/* Supported Rx offloads */ +static uint64_t dev_rx_offloads_sup = + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM; + +/* Supported Tx offloads */ +static uint64_t dev_tx_offloads_sup = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM; /* TODO: make pfe_svr a runtime option. * Driver should be able to get the SVR @@ -71,6 +85,262 @@ pfe_soc_version_get(void) fclose(svr_file); } +static int pfe_eth_start(struct pfe_eth_priv_s *priv) +{ + gpi_enable(priv->GPI_baseaddr); + gemac_enable(priv->EMAC_baseaddr); + + return 0; +} + +static void +pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int + __rte_unused from_tx, __rte_unused int n_desc) +{ + struct rte_mbuf *mbuf; + unsigned int flags; + + /* Clean HIF and client queue */ + while ((mbuf = hif_lib_tx_get_next_complete(&priv->client, + tx_q_num, &flags, + HIF_TX_DESC_NT))) { + if (mbuf) { + mbuf->next = NULL; + mbuf->nb_segs = 1; + rte_pktmbuf_free(mbuf); + } + } +} + + +static void +pfe_eth_flush_tx(struct pfe_eth_priv_s *priv) +{ + unsigned int ii; + + for (ii = 0; ii < emac_txq_cnt; ii++) + pfe_eth_flush_txQ(priv, ii, 0, 0); +} + +static int +pfe_eth_event_handler(void *data, int event, __rte_unused int qno) +{ + struct pfe_eth_priv_s *priv = data; + + switch (event) { + case EVENT_TXDONE_IND: + pfe_eth_flush_tx(priv); + hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0); + break; + case EVENT_HIGH_RX_WM: + default: + break; + } + + return 0; +} + +static uint16_t +pfe_recv_pkts_on_intr(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct hif_client_rx_queue *queue = rxq; + struct pfe_eth_priv_s *priv = queue->priv; + struct epoll_event epoll_ev; + uint64_t ticks = 1; /* 1 msec */ + int ret; + int have_something, work_done; + +#define RESET_STATUS (HIF_INT | HIF_RXPKT_INT) + + /*TODO can we remove this cleanup from here?*/ + pfe_tx_do_cleanup(priv->pfe); + have_something = pfe_hif_rx_process(priv->pfe, nb_pkts); + work_done = hif_lib_receive_pkt(rxq, priv->pfe->hif.shm->pool, + rx_pkts, nb_pkts); + + if (!have_something || !work_done) { + writel(RESET_STATUS, HIF_INT_SRC); + writel(readl(HIF_INT_ENABLE) | HIF_RXPKT_INT, HIF_INT_ENABLE); + ret = epoll_wait(priv->pfe->hif.epoll_fd, &epoll_ev, 1, ticks); + if (ret < 0 && errno != EINTR) + PFE_PMD_ERR("epoll_wait fails with %d\n", errno); + } + + return work_done; +} + +static uint16_t +pfe_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct hif_client_rx_queue *queue = rxq; + struct pfe_eth_priv_s *priv = queue->priv; + struct rte_mempool *pool; + + /*TODO can we remove this cleanup from here?*/ + pfe_tx_do_cleanup(priv->pfe); + pfe_hif_rx_process(priv->pfe, nb_pkts); + pool = priv->pfe->hif.shm->pool; + + return hif_lib_receive_pkt(rxq, pool, rx_pkts, nb_pkts); +} + +static uint16_t +pfe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct hif_client_tx_queue *queue = tx_queue; + struct pfe_eth_priv_s *priv = queue->priv; + struct rte_eth_stats *stats = &priv->stats; + int i; + + for (i = 0; i < nb_pkts; i++) { + if (tx_pkts[i]->nb_segs > 1) { + struct rte_mbuf *mbuf; + int j; + + hif_lib_xmit_pkt(&priv->client, queue->queue_id, + (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), + tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, + tx_pkts[i]->data_len, 0x0, HIF_FIRST_BUFFER, + tx_pkts[i]); + + mbuf = tx_pkts[i]->next; + for (j = 0; j < (tx_pkts[i]->nb_segs - 2); j++) { + hif_lib_xmit_pkt(&priv->client, queue->queue_id, + (void *)(size_t)rte_pktmbuf_iova(mbuf), + mbuf->buf_addr + mbuf->data_off, + mbuf->data_len, + 0x0, 0x0, mbuf); + mbuf = mbuf->next; + } + + hif_lib_xmit_pkt(&priv->client, queue->queue_id, + (void *)(size_t)rte_pktmbuf_iova(mbuf), + mbuf->buf_addr + mbuf->data_off, + mbuf->data_len, + 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID, + mbuf); + } else { + hif_lib_xmit_pkt(&priv->client, queue->queue_id, + (void *)(size_t)rte_pktmbuf_iova(tx_pkts[i]), + tx_pkts[i]->buf_addr + tx_pkts[i]->data_off, + tx_pkts[i]->pkt_len, 0 /*ctrl*/, + HIF_FIRST_BUFFER | HIF_LAST_BUFFER | + HIF_DATA_VALID, + tx_pkts[i]); + } + stats->obytes += tx_pkts[i]->pkt_len; + hif_tx_dma_start(); + } + stats->opackets += nb_pkts; + pfe_tx_do_cleanup(priv->pfe); + + return nb_pkts; +} + +static uint16_t +pfe_dummy_xmit_pkts(__rte_unused void *tx_queue, + __rte_unused struct rte_mbuf **tx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static uint16_t +pfe_dummy_recv_pkts(__rte_unused void *rxq, + __rte_unused struct rte_mbuf **rx_pkts, + __rte_unused uint16_t nb_pkts) +{ + return 0; +} + +static int +pfe_eth_open(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct hif_client_s *client; + struct hif_shm *hif_shm; + int rc; + + /* Register client driver with HIF */ + client = &priv->client; + + if (client->pfe) { + hif_shm = client->pfe->hif.shm; + /* TODO please remove the below code of if block, once we add + * the proper cleanup in eth_close + */ + if (!test_bit(PFE_CL_GEM0 + priv->id, + &hif_shm->g_client_status[0])) { + /* Register client driver with HIF */ + memset(client, 0, sizeof(*client)); + client->id = PFE_CL_GEM0 + priv->id; + client->tx_qn = emac_txq_cnt; + client->rx_qn = EMAC_RXQ_CNT; + client->priv = priv; + client->pfe = priv->pfe; + client->port_id = dev->data->port_id; + client->event_handler = pfe_eth_event_handler; + + client->tx_qsize = EMAC_TXQ_DEPTH; + client->rx_qsize = EMAC_RXQ_DEPTH; + + rc = hif_lib_client_register(client); + if (rc) { + PFE_PMD_ERR("hif_lib_client_register(%d)" + " failed", client->id); + goto err0; + } + } else { + /* Freeing the packets if already exists */ + int ret = 0; + struct rte_mbuf *rx_pkts[32]; + /* TODO multiqueue support */ + ret = hif_lib_receive_pkt(&client->rx_q[0], + hif_shm->pool, rx_pkts, 32); + while (ret) { + int i; + for (i = 0; i < ret; i++) + rte_pktmbuf_free(rx_pkts[i]); + ret = hif_lib_receive_pkt(&client->rx_q[0], + hif_shm->pool, + rx_pkts, 32); + } + } + } else { + /* Register client driver with HIF */ + memset(client, 0, sizeof(*client)); + client->id = PFE_CL_GEM0 + priv->id; + client->tx_qn = emac_txq_cnt; + client->rx_qn = EMAC_RXQ_CNT; + client->priv = priv; + client->pfe = priv->pfe; + client->port_id = dev->data->port_id; + client->event_handler = pfe_eth_event_handler; + + client->tx_qsize = EMAC_TXQ_DEPTH; + client->rx_qsize = EMAC_RXQ_DEPTH; + + rc = hif_lib_client_register(client); + if (rc) { + PFE_PMD_ERR("hif_lib_client_register(%d) failed", + client->id); + goto err0; + } + } + rc = pfe_eth_start(priv); + dev->rx_pkt_burst = &pfe_recv_pkts; + dev->tx_pkt_burst = &pfe_xmit_pkts; + /* If no prefetch is configured. */ + if (getenv("PFE_INTR_SUPPORT")) { + dev->rx_pkt_burst = &pfe_recv_pkts_on_intr; + PFE_PMD_INFO("PFE INTERRUPT Mode enabled"); + } + + +err0: + return rc; +} + static int pfe_eth_open_cdev(struct pfe_eth_priv_s *priv) { @@ -105,19 +375,412 @@ pfe_eth_close_cdev(struct pfe_eth_priv_s *priv) } } +static void +pfe_eth_stop(struct rte_eth_dev *dev/*, int wake*/) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + gemac_disable(priv->EMAC_baseaddr); + gpi_disable(priv->GPI_baseaddr); + + dev->rx_pkt_burst = &pfe_dummy_recv_pkts; + dev->tx_pkt_burst = &pfe_dummy_xmit_pkts; +} + static void pfe_eth_exit(struct rte_eth_dev *dev, struct pfe *pfe) { PMD_INIT_FUNC_TRACE(); + pfe_eth_stop(dev); /* Close the device file for link status */ pfe_eth_close_cdev(dev->data->dev_private); - rte_free(dev->data->mac_addrs); rte_eth_dev_release_port(dev); pfe->nb_devs--; } +static void +pfe_eth_close(struct rte_eth_dev *dev) +{ + if (!dev) + return; + + if (!g_pfe) + return; + + pfe_eth_exit(dev, g_pfe); + + if (g_pfe->nb_devs == 0) { + pfe_hif_exit(g_pfe); + pfe_hif_lib_exit(g_pfe); + rte_free(g_pfe); + g_pfe = NULL; + } +} + +static int +pfe_eth_configure(struct rte_eth_dev *dev __rte_unused) +{ + return 0; +} + +static int +pfe_eth_info(struct rte_eth_dev *dev, + struct rte_eth_dev_info *dev_info) +{ + struct pfe_eth_priv_s *internals = dev->data->dev_private; + + dev_info->if_index = internals->id; + dev_info->max_mac_addrs = PFE_MAX_MACS; + dev_info->max_rx_queues = dev->data->nb_rx_queues; + dev_info->max_tx_queues = dev->data->nb_tx_queues; + dev_info->min_rx_bufsize = HIF_RX_PKT_MIN_SIZE; + dev_info->min_mtu = RTE_ETHER_MIN_MTU; + dev_info->rx_offload_capa = dev_rx_offloads_sup; + dev_info->tx_offload_capa = dev_tx_offloads_sup; + if (pfe_svr == SVR_LS1012A_REV1) { + dev_info->max_rx_pktlen = MAX_MTU_ON_REV1 + PFE_ETH_OVERHEAD; + dev_info->max_mtu = MAX_MTU_ON_REV1; + } else { + dev_info->max_rx_pktlen = JUMBO_FRAME_SIZE; + dev_info->max_mtu = JUMBO_FRAME_SIZE - PFE_ETH_OVERHEAD; + } + + return 0; +} + +/* Only first mb_pool given on first call of this API will be used + * in whole system, also nb_rx_desc and rx_conf are unused params + */ +static int +pfe_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + __rte_unused uint16_t nb_rx_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mb_pool) +{ + int rc = 0; + struct pfe *pfe; + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + pfe = priv->pfe; + + if (queue_idx >= EMAC_RXQ_CNT) { + PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", + queue_idx, EMAC_RXQ_CNT); + return -1; + } + + if (!pfe->hif.setuped) { + rc = pfe_hif_shm_init(pfe->hif.shm, mb_pool); + if (rc) { + PFE_PMD_ERR("Could not allocate buffer descriptors"); + return -1; + } + + pfe->hif.shm->pool = mb_pool; + if (pfe_hif_init_buffers(&pfe->hif)) { + PFE_PMD_ERR("Could not initialize buffer descriptors"); + return -1; + } + hif_init(); + hif_rx_enable(); + hif_tx_enable(); + pfe->hif.setuped = 1; + } + dev->data->rx_queues[queue_idx] = &priv->client.rx_q[queue_idx]; + priv->client.rx_q[queue_idx].queue_id = queue_idx; + + return 0; +} + +static void +pfe_rx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static void +pfe_tx_queue_release(void *q __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); +} + +static int +pfe_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + __rte_unused uint16_t nb_desc, + __rte_unused unsigned int socket_id, + __rte_unused const struct rte_eth_txconf *tx_conf) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + if (queue_idx >= emac_txq_cnt) { + PFE_PMD_ERR("Invalid queue idx = %d, Max queues = %d", + queue_idx, emac_txq_cnt); + return -1; + } + dev->data->tx_queues[queue_idx] = &priv->client.tx_q[queue_idx]; + priv->client.tx_q[queue_idx].queue_id = queue_idx; + return 0; +} + +static const uint32_t * +pfe_supported_ptypes_get(struct rte_eth_dev *dev) +{ + static const uint32_t ptypes[] = { + /*todo -= add more types */ + RTE_PTYPE_L2_ETHER, + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_SCTP + }; + + if (dev->rx_pkt_burst == pfe_recv_pkts || + dev->rx_pkt_burst == pfe_recv_pkts_on_intr) + return ptypes; + return NULL; +} + +static inline int +pfe_eth_atomic_read_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = link; + struct rte_eth_link *src = &dev->data->dev_link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static inline int +pfe_eth_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &dev->data->dev_link; + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static int +pfe_eth_link_update(struct rte_eth_dev *dev, int wait_to_complete __rte_unused) +{ + int ret, ioctl_cmd = 0; + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct rte_eth_link link, old; + unsigned int lstatus = 1; + + if (dev == NULL) { + PFE_PMD_ERR("Invalid device in link_update.\n"); + return 0; + } + + memset(&old, 0, sizeof(old)); + memset(&link, 0, sizeof(struct rte_eth_link)); + + pfe_eth_atomic_read_link_status(dev, &old); + + /* Read from PFE CDEV, status of link, if file was successfully + * opened. + */ + if (priv->link_fd != PFE_CDEV_INVALID_FD) { + if (priv->id == 0) + ioctl_cmd = PFE_CDEV_ETH0_STATE_GET; + if (priv->id == 1) + ioctl_cmd = PFE_CDEV_ETH1_STATE_GET; + + ret = ioctl(priv->link_fd, ioctl_cmd, &lstatus); + if (ret != 0) { + PFE_PMD_ERR("Unable to fetch link status (ioctl)\n"); + /* use dummy link value */ + link.link_status = 1; + } + PFE_PMD_DEBUG("Fetched link state (%d) for dev %d.\n", + lstatus, priv->id); + } + + if (old.link_status == lstatus) { + /* no change in status */ + PFE_PMD_DEBUG("No change in link status; Not updating.\n"); + return -1; + } + + link.link_status = lstatus; + link.link_speed = ETH_LINK_SPEED_1G; + link.link_duplex = ETH_LINK_FULL_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; + + pfe_eth_atomic_write_link_status(dev, &link); + + PFE_PMD_INFO("Port (%d) link is %s\n", dev->data->port_id, + link.link_status ? "up" : "down"); + + return 0; +} + +static int +pfe_promiscuous_enable(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + priv->promisc = 1; + dev->data->promiscuous = 1; + gemac_enable_copy_all(priv->EMAC_baseaddr); + + return 0; +} + +static int +pfe_promiscuous_disable(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + priv->promisc = 0; + dev->data->promiscuous = 0; + gemac_disable_copy_all(priv->EMAC_baseaddr); + + return 0; +} + +static int +pfe_allmulticast_enable(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct pfe_mac_addr hash_addr; /* hash register structure */ + + /* Set the hash to rx all multicast frames */ + hash_addr.bottom = 0xFFFFFFFF; + hash_addr.top = 0xFFFFFFFF; + gemac_set_hash(priv->EMAC_baseaddr, &hash_addr); + dev->data->all_multicast = 1; + + return 0; +} + +static int +pfe_link_down(struct rte_eth_dev *dev) +{ + pfe_eth_stop(dev); + return 0; +} + +static int +pfe_link_up(struct rte_eth_dev *dev) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + + pfe_eth_start(priv); + return 0; +} + +static int +pfe_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + int ret; + struct pfe_eth_priv_s *priv = dev->data->dev_private; + uint16_t frame_size = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN; + + /*TODO Support VLAN*/ + ret = gemac_set_rx(priv->EMAC_baseaddr, frame_size); + if (!ret) + dev->data->mtu = mtu; + + return ret; +} + +/* pfe_eth_enet_addr_byte_mac + */ +static int +pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr, + struct pfe_mac_addr *enet_addr) +{ + if (!enet_byte_addr || !enet_addr) { + return -1; + + } else { + enet_addr->bottom = enet_byte_addr[0] | + (enet_byte_addr[1] << 8) | + (enet_byte_addr[2] << 16) | + (enet_byte_addr[3] << 24); + enet_addr->top = enet_byte_addr[4] | + (enet_byte_addr[5] << 8); + return 0; + } +} + +static int +pfe_dev_set_mac_addr(struct rte_eth_dev *dev, + struct rte_ether_addr *addr) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct pfe_mac_addr spec_addr; + int ret; + + ret = pfe_eth_enet_addr_byte_mac(addr->addr_bytes, &spec_addr); + if (ret) + return ret; + + gemac_set_laddrN(priv->EMAC_baseaddr, + (struct pfe_mac_addr *)&spec_addr, 1); + rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]); + return 0; +} + +static int +pfe_stats_get(struct rte_eth_dev *dev, + struct rte_eth_stats *stats) +{ + struct pfe_eth_priv_s *priv = dev->data->dev_private; + struct rte_eth_stats *eth_stats = &priv->stats; + + if (stats == NULL) + return -1; + + memset(stats, 0, sizeof(struct rte_eth_stats)); + + stats->ipackets = eth_stats->ipackets; + stats->ibytes = eth_stats->ibytes; + stats->opackets = eth_stats->opackets; + stats->obytes = eth_stats->obytes; + + return 0; +} + +static const struct eth_dev_ops ops = { + .dev_start = pfe_eth_open, + .dev_stop = pfe_eth_stop, + .dev_close = pfe_eth_close, + .dev_configure = pfe_eth_configure, + .dev_infos_get = pfe_eth_info, + .rx_queue_setup = pfe_rx_queue_setup, + .rx_queue_release = pfe_rx_queue_release, + .tx_queue_setup = pfe_tx_queue_setup, + .tx_queue_release = pfe_tx_queue_release, + .dev_supported_ptypes_get = pfe_supported_ptypes_get, + .link_update = pfe_eth_link_update, + .promiscuous_enable = pfe_promiscuous_enable, + .promiscuous_disable = pfe_promiscuous_disable, + .allmulticast_enable = pfe_allmulticast_enable, + .dev_set_link_down = pfe_link_down, + .dev_set_link_up = pfe_link_up, + .mtu_set = pfe_mtu_set, + .mac_addr_set = pfe_dev_set_mac_addr, + .stats_get = pfe_stats_get, +}; + static int pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) { @@ -125,6 +788,7 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) struct pfe_eth_priv_s *priv = NULL; struct ls1012a_eth_platform_data *einfo; struct ls1012a_pfe_platform_data *pfe_info; + struct rte_ether_addr addr; int err; eth_dev = rte_eth_vdev_allocate(vdev, sizeof(*priv)); @@ -177,7 +841,15 @@ pfe_eth_init(struct rte_vdev_device *vdev, struct pfe *pfe, int id) goto err0; } + memcpy(addr.addr_bytes, priv->einfo->mac_addr, + ETH_ALEN); + + pfe_dev_set_mac_addr(eth_dev, &addr); + rte_ether_addr_copy(&addr, ð_dev->data->mac_addrs[0]); + eth_dev->data->mtu = 1500; + eth_dev->dev_ops = &ops; + pfe_eth_stop(eth_dev); pfe_gemac_init(priv); eth_dev->data->nb_rx_queues = 1; @@ -317,7 +989,7 @@ pmd_pfe_probe(struct rte_vdev_device *vdev) if (rc < 0) return -EINVAL; - RTE_LOG(INFO, PMD, "Initializing pmd_pfe for %s Given gem-id %d\n", + PFE_PMD_LOG(INFO, "Initializing pmd_pfe for %s Given gem-id %d", name, init_params.gem_id); if (g_pfe) { @@ -445,7 +1117,7 @@ eth_init: else gem_id = init_params.gem_id; - RTE_LOG(INFO, PMD, "Init pmd_pfe for %s gem-id %d(given =%d)\n", + PFE_PMD_LOG(INFO, "Init pmd_pfe for %s gem-id %d(given =%d)", name, gem_id, init_params.gem_id); rc = pfe_eth_init(vdev, g_pfe, gem_id);