X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=drivers%2Fnet%2Focteontx%2Focteontx_ethdev.c;h=97b4298febe695749d3c64c5a5754b53eb190325;hb=5cbe184802aa;hp=ee06cd355466feb4b76d396780d1684c31a8ec21;hpb=5f19dee604ed5760d819743d1d364493ead2aae6;p=dpdk.git diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c index ee06cd3554..97b4298feb 100644 --- a/drivers/net/octeontx/octeontx_ethdev.c +++ b/drivers/net/octeontx/octeontx_ethdev.c @@ -24,6 +24,10 @@ #include "octeontx_rxtx.h" #include "octeontx_logs.h" +struct evdev_priv_data { + OFFLOAD_FLAGS; /*Sequence should not be changed */ +} __rte_cache_aligned; + struct octeontx_vdev_init_params { uint8_t nr_port; }; @@ -46,9 +50,7 @@ int otx_net_logtype_mbox; int otx_net_logtype_init; int otx_net_logtype_driver; -RTE_INIT(otx_net_init_log); -static void -otx_net_init_log(void) +RTE_INIT(otx_net_init_log) { otx_net_logtype_mbox = rte_log_register("pmd.net.octeontx.mbox"); if (otx_net_logtype_mbox >= 0) @@ -144,7 +146,8 @@ octeontx_port_open(struct octeontx_nic *nic) nic->mcast_mode = bgx_port_conf.mcast_mode; nic->speed = bgx_port_conf.mode; - memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], ETHER_ADDR_LEN); + memcpy(&nic->mac_addr[0], &bgx_port_conf.macaddr[0], + RTE_ETHER_ADDR_LEN); octeontx_log_dbg("port opened %d", nic->port_id); return res; @@ -175,7 +178,7 @@ octeontx_port_stop(struct octeontx_nic *nic) return octeontx_bgx_port_stop(nic->port_id); } -static void +static int octeontx_port_promisc_set(struct octeontx_nic *nic, int en) { struct rte_eth_dev *dev; @@ -186,15 +189,19 @@ octeontx_port_promisc_set(struct octeontx_nic *nic, int en) dev = nic->dev; res = octeontx_bgx_port_promisc_set(nic->port_id, en); - if (res < 0) + if (res < 0) { octeontx_log_err("failed to set promiscuous mode %d", nic->port_id); + return res; + } /* Set proper flag for the mode */ dev->data->promiscuous = (en != 0) ? 1 : 0; octeontx_log_dbg("port %d : promiscuous mode %s", nic->port_id, en ? "set" : "unset"); + + return 0; } static int @@ -225,12 +232,12 @@ octeontx_port_stats(struct octeontx_nic *nic, struct rte_eth_stats *stats) return 0; } -static void +static int octeontx_port_stats_clr(struct octeontx_nic *nic) { PMD_INIT_FUNC_TRACE(); - octeontx_bgx_port_stats_clr(nic->port_id); + return octeontx_bgx_port_stats_clr(nic->port_id); } static inline void @@ -254,6 +261,45 @@ devconf_set_default_sane_values(struct rte_event_dev_config *dev_conf, info->max_num_events; } +static uint16_t +octeontx_tx_offload_flags(struct rte_eth_dev *eth_dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + uint16_t flags = 0; + + if (!(nic->tx_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE)) + flags |= OCCTX_TX_OFFLOAD_MBUF_NOFF_F; + + if (nic->tx_offloads & DEV_TX_OFFLOAD_MULTI_SEGS) + flags |= OCCTX_TX_MULTI_SEG_F; + + return flags; +} + +static uint16_t +octeontx_rx_offload_flags(struct rte_eth_dev *eth_dev) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct rte_eth_conf *conf = &data->dev_conf; + struct rte_eth_rxmode *rxmode = &conf->rxmode; + uint16_t flags = 0; + + if (rxmode->mq_mode == ETH_MQ_RX_RSS) + flags |= OCCTX_RX_OFFLOAD_RSS_F; + + if (nic->rx_offloads & DEV_RX_OFFLOAD_SCATTER) { + flags |= OCCTX_RX_MULTI_SEG_F; + eth_dev->data->scattered_rx = 1; + /* If scatter mode is enabled, TX should also be in multi + * seg mode, else memory leak will occur + */ + nic->tx_offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; + } + + return flags; +} + static int octeontx_dev_configure(struct rte_eth_dev *dev) { @@ -262,8 +308,6 @@ octeontx_dev_configure(struct rte_eth_dev *dev) struct rte_eth_rxmode *rxmode = &conf->rxmode; struct rte_eth_txmode *txmode = &conf->txmode; struct octeontx_nic *nic = octeontx_pmd_priv(dev); - uint64_t configured_offloads; - uint64_t unsupported_offloads; int ret; PMD_INIT_FUNC_TRACE(); @@ -285,38 +329,9 @@ octeontx_dev_configure(struct rte_eth_dev *dev) return -EINVAL; } - configured_offloads = rxmode->offloads; - - if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { - PMD_INIT_LOG(NOTICE, "can't disable hw crc strip"); - configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP; - } - - unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS; - - if (unsupported_offloads) { - PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. " - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - unsupported_offloads, configured_offloads, - (uint64_t)OCTEONTX_RX_OFFLOADS); - return -ENOTSUP; - } - - configured_offloads = txmode->offloads; - - if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) { + if (!(txmode->offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) { PMD_INIT_LOG(NOTICE, "cant disable lockfree tx"); - configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE; - } - - unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS; - - if (unsupported_offloads) { - PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported." - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", - unsupported_offloads, configured_offloads, - (uint64_t)OCTEONTX_TX_OFFLOADS); - return -ENOTSUP; + txmode->offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE; } if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { @@ -336,7 +351,7 @@ octeontx_dev_configure(struct rte_eth_dev *dev) nic->num_tx_queues = dev->data->nb_tx_queues; - ret = octeontx_pko_channel_open(nic->port_id * PKO_VF_NUM_DQ, + ret = octeontx_pko_channel_open(nic->pko_vfid * PKO_VF_NUM_DQ, nic->num_tx_queues, nic->base_ochan); if (ret) { @@ -349,6 +364,11 @@ octeontx_dev_configure(struct rte_eth_dev *dev) nic->pki.hash_enable = true; nic->pki.initialized = false; + nic->rx_offloads |= rxmode->offloads; + nic->tx_offloads |= txmode->offloads; + nic->rx_offload_flags |= octeontx_rx_offload_flags(dev); + nic->tx_offload_flags |= octeontx_tx_offload_flags(dev); + return 0; } @@ -378,21 +398,66 @@ octeontx_dev_close(struct rte_eth_dev *dev) rte_free(txq); } + + /* Free MAC address table */ + rte_free(dev->data->mac_addrs); + dev->data->mac_addrs = NULL; + + dev->tx_pkt_burst = NULL; + dev->rx_pkt_burst = NULL; +} + +static int +octeontx_recheck_rx_offloads(struct octeontx_rxq *rxq) +{ + struct rte_eth_dev *eth_dev = rxq->eth_dev; + struct octeontx_nic *nic = octeontx_pmd_priv(eth_dev); + struct rte_eth_dev_data *data = eth_dev->data; + struct rte_pktmbuf_pool_private *mbp_priv; + struct evdev_priv_data *evdev_priv; + struct rte_eventdev *dev; + uint32_t buffsz; + + /* Get rx buffer size */ + mbp_priv = rte_mempool_get_priv(rxq->pool); + buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; + + /* Setup scatter mode if needed by jumbo */ + if (data->dev_conf.rxmode.max_rx_pkt_len > buffsz) { + nic->rx_offloads |= DEV_RX_OFFLOAD_SCATTER; + nic->rx_offload_flags |= octeontx_rx_offload_flags(eth_dev); + nic->tx_offload_flags |= octeontx_tx_offload_flags(eth_dev); + } + + /* Sharing offload flags via eventdev priv region */ + dev = &rte_eventdevs[rxq->evdev]; + evdev_priv = dev->data->dev_private; + evdev_priv->rx_offload_flags = nic->rx_offload_flags; + evdev_priv->tx_offload_flags = nic->tx_offload_flags; + + return 0; } static int octeontx_dev_start(struct rte_eth_dev *dev) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); - int ret; + struct octeontx_rxq *rxq; + int ret = 0, i; - ret = 0; + /* Rechecking if any new offload set to update + * rx/tx burst function pointer accordingly. + */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxq = dev->data->rx_queues[i]; + octeontx_recheck_rx_offloads(rxq); + } PMD_INIT_FUNC_TRACE(); /* * Tx start */ - dev->tx_pkt_burst = octeontx_xmit_pkts; + octeontx_set_tx_function(dev); ret = octeontx_pko_channel_start(nic->base_ochan); if (ret < 0) { octeontx_log_err("fail to conf VF%d no. txq %d chan %d ret %d", @@ -471,27 +536,24 @@ octeontx_dev_stop(struct rte_eth_dev *dev) ret); return; } - - dev->tx_pkt_burst = NULL; - dev->rx_pkt_burst = NULL; } -static void +static int octeontx_dev_promisc_enable(struct rte_eth_dev *dev) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); PMD_INIT_FUNC_TRACE(); - octeontx_port_promisc_set(nic, 1); + return octeontx_port_promisc_set(nic, 1); } -static void +static int octeontx_dev_promisc_disable(struct rte_eth_dev *dev) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); PMD_INIT_FUNC_TRACE(); - octeontx_port_promisc_set(nic, 0); + return octeontx_port_promisc_set(nic, 0); } static int @@ -577,35 +639,73 @@ octeontx_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) return octeontx_port_stats(nic, stats); } -static void +static int octeontx_dev_stats_reset(struct rte_eth_dev *dev) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); PMD_INIT_FUNC_TRACE(); - octeontx_port_stats_clr(nic); + return octeontx_port_stats_clr(nic); +} + +static void +octeontx_dev_mac_addr_del(struct rte_eth_dev *dev, uint32_t index) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + int ret; + + ret = octeontx_bgx_port_mac_del(nic->port_id, index); + if (ret != 0) + octeontx_log_err("failed to del MAC address filter on port %d", + nic->port_id); +} + +static int +octeontx_dev_mac_addr_add(struct rte_eth_dev *dev, + struct rte_ether_addr *mac_addr, + uint32_t index, + __rte_unused uint32_t vmdq) +{ + struct octeontx_nic *nic = octeontx_pmd_priv(dev); + int ret; + + ret = octeontx_bgx_port_mac_add(nic->port_id, mac_addr->addr_bytes, + index); + if (ret < 0) { + octeontx_log_err("failed to add MAC address filter on port %d", + nic->port_id); + return ret; + } + + return 0; } static int octeontx_dev_default_mac_addr_set(struct rte_eth_dev *dev, - struct ether_addr *addr) + struct rte_ether_addr *addr) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); int ret; ret = octeontx_bgx_port_mac_set(nic->port_id, addr->addr_bytes); - if (ret != 0) + if (ret == 0) { + /* Update same mac address to BGX CAM table */ + ret = octeontx_bgx_port_mac_add(nic->port_id, addr->addr_bytes, + 0); + } + if (ret < 0) { octeontx_log_err("failed to set MAC address on port %d", - nic->port_id); + nic->port_id); + } return ret; } -static void +static int octeontx_dev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { - RTE_SET_USED(dev); + struct octeontx_nic *nic = octeontx_pmd_priv(dev); /* Autonegotiation may be disabled */ dev_info->speed_capa = ETH_LINK_SPEED_FIXED; @@ -613,7 +713,8 @@ octeontx_dev_info(struct rte_eth_dev *dev, ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | ETH_LINK_SPEED_40G; - dev_info->max_mac_addrs = 1; + dev_info->max_mac_addrs = + octeontx_bgx_port_mac_entries_get(nic->port_id); dev_info->max_rx_pktlen = PKI_MAX_PKTLEN; dev_info->max_rx_queues = 1; dev_info->max_tx_queues = PKO_MAX_NUM_DQ; @@ -627,14 +728,15 @@ octeontx_dev_info(struct rte_eth_dev *dev, dev_info->default_txconf = (struct rte_eth_txconf) { .tx_free_thresh = 0, - .txq_flags = - ETH_TXQ_FLAGS_NOMULTSEGS | - ETH_TXQ_FLAGS_NOOFFLOADS | - ETH_TXQ_FLAGS_NOXSUMS, + .offloads = OCTEONTX_TX_OFFLOADS, }; dev_info->rx_offload_capa = OCTEONTX_RX_OFFLOADS; dev_info->tx_offload_capa = OCTEONTX_TX_OFFLOADS; + dev_info->rx_queue_offload_capa = OCTEONTX_RX_OFFLOADS; + dev_info->tx_queue_offload_capa = OCTEONTX_TX_OFFLOADS; + + return 0; } static void @@ -738,19 +840,17 @@ octeontx_dev_tx_queue_release(void *tx_queue) static int octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, uint16_t nb_desc, unsigned int socket_id, - const struct rte_eth_txconf *tx_conf) + const struct rte_eth_txconf *tx_conf __rte_unused) { struct octeontx_nic *nic = octeontx_pmd_priv(dev); struct octeontx_txq *txq = NULL; uint16_t dq_num; int res = 0; - uint64_t configured_offloads; - uint64_t unsupported_offloads; RTE_SET_USED(nb_desc); RTE_SET_USED(socket_id); - dq_num = (nic->port_id * PKO_VF_NUM_DQ) + qidx; + dq_num = (nic->pko_vfid * PKO_VF_NUM_DQ) + qidx; /* Socket id check */ if (socket_id != (unsigned int)SOCKET_ID_ANY && @@ -766,22 +866,6 @@ octeontx_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, dev->data->tx_queues[qidx] = NULL; } - configured_offloads = tx_conf->offloads; - - if (!(configured_offloads & DEV_TX_OFFLOAD_MT_LOCKFREE)) { - PMD_INIT_LOG(NOTICE, "cant disable lockfree tx"); - configured_offloads |= DEV_TX_OFFLOAD_MT_LOCKFREE; - } - - unsupported_offloads = configured_offloads & ~OCTEONTX_TX_OFFLOADS; - if (unsupported_offloads) { - PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported." - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n", - unsupported_offloads, configured_offloads, - (uint64_t)OCTEONTX_TX_OFFLOADS); - return -ENOTSUP; - } - /* Allocating tx queue data structure */ txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct octeontx_txq), RTE_CACHE_LINE_SIZE, nic->node); @@ -834,11 +918,9 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, pki_qos_cfg_t pki_qos; uintptr_t pool; int ret, port; - uint8_t gaura; + uint16_t gaura; unsigned int ev_queues = (nic->ev_queues * nic->port_id) + qidx; unsigned int ev_ports = (nic->ev_ports * nic->port_id) + qidx; - uint64_t configured_offloads; - uint64_t unsupported_offloads; RTE_SET_USED(nb_desc); @@ -861,22 +943,6 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, port = nic->port_id; - configured_offloads = rx_conf->offloads; - - if (!(configured_offloads & DEV_RX_OFFLOAD_CRC_STRIP)) { - PMD_INIT_LOG(NOTICE, "can't disable hw crc strip"); - configured_offloads |= DEV_RX_OFFLOAD_CRC_STRIP; - } - - unsupported_offloads = configured_offloads & ~OCTEONTX_RX_OFFLOADS; - - if (unsupported_offloads) { - PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. " - "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n", - unsupported_offloads, configured_offloads, - (uint64_t)OCTEONTX_RX_OFFLOADS); - return -ENOTSUP; - } /* Rx deferred start is not supported */ if (rx_conf->rx_deferred_start) { octeontx_log_err("rx deferred start not supported"); @@ -916,10 +982,11 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, pktbuf_conf.mmask.f_cache_mode = 1; pktbuf_conf.wqe_skip = OCTTX_PACKET_WQE_SKIP; - pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP; + pktbuf_conf.first_skip = OCTTX_PACKET_FIRST_SKIP(mb_pool); pktbuf_conf.later_skip = OCTTX_PACKET_LATER_SKIP; pktbuf_conf.mbuff_size = (mb_pool->elt_size - RTE_PKTMBUF_HEADROOM - + rte_pktmbuf_priv_size(mb_pool) - sizeof(struct rte_mbuf)); pktbuf_conf.cache_mode = PKI_OPC_MODE_STF2_STT; @@ -963,8 +1030,8 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, pool = (uintptr_t)mb_pool->pool_id; - /* Get the gpool Id */ - gaura = octeontx_fpa_bufpool_gpool(pool); + /* Get the gaura Id */ + gaura = octeontx_fpa_bufpool_gaura(pool); pki_qos.qpg_qos = PKI_QPG_QOS_NONE; pki_qos.num_entry = 1; @@ -993,7 +1060,9 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, rxq->evdev = nic->evdev; rxq->ev_queues = ev_queues; rxq->ev_ports = ev_ports; + rxq->pool = mb_pool; + octeontx_recheck_rx_offloads(rxq); dev->data->rx_queues[qidx] = rxq; dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; @@ -1048,6 +1117,8 @@ static const struct eth_dev_ops octeontx_dev_ops = { .link_update = octeontx_dev_link_update, .stats_get = octeontx_dev_stats_get, .stats_reset = octeontx_dev_stats_reset, + .mac_addr_remove = octeontx_dev_mac_addr_del, + .mac_addr_add = octeontx_dev_mac_addr_add, .mac_addr_set = octeontx_dev_default_mac_addr_set, .tx_queue_start = octeontx_dev_tx_queue_start, .tx_queue_stop = octeontx_dev_tx_queue_stop, @@ -1065,11 +1136,13 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, int socket_id) { int res; + size_t pko_vfid; char octtx_name[OCTEONTX_MAX_NAME_LEN]; struct octeontx_nic *nic = NULL; struct rte_eth_dev *eth_dev = NULL; struct rte_eth_dev_data *data; const char *name = rte_vdev_device_name(dev); + int max_entries; PMD_INIT_FUNC_TRACE(); @@ -1079,18 +1152,39 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, if (eth_dev == NULL) return -ENODEV; - eth_dev->tx_pkt_burst = octeontx_xmit_pkts; + eth_dev->dev_ops = &octeontx_dev_ops; + eth_dev->device = &dev->device; + octeontx_set_tx_function(eth_dev); eth_dev->rx_pkt_burst = octeontx_recv_pkts; + rte_eth_dev_probing_finish(eth_dev); return 0; } + /* Reserve an ethdev entry */ + eth_dev = rte_eth_dev_allocate(octtx_name); + if (eth_dev == NULL) { + octeontx_log_err("failed to allocate rte_eth_dev"); + res = -ENOMEM; + goto err; + } + data = eth_dev->data; + nic = rte_zmalloc_socket(octtx_name, sizeof(*nic), 0, socket_id); if (nic == NULL) { octeontx_log_err("failed to allocate nic structure"); res = -ENOMEM; goto err; } + data->dev_private = nic; + pko_vfid = octeontx_pko_get_vfid(); + if (pko_vfid == SIZE_MAX) { + octeontx_log_err("failed to get pko vfid"); + res = -ENODEV; + goto err; + } + + nic->pko_vfid = pko_vfid; nic->port_id = port; nic->evdev = evdev; @@ -1106,21 +1200,11 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, goto err; } - /* Reserve an ethdev entry */ - eth_dev = rte_eth_dev_allocate(octtx_name); - if (eth_dev == NULL) { - octeontx_log_err("failed to allocate rte_eth_dev"); - res = -ENOMEM; - goto err; - } - eth_dev->device = &dev->device; eth_dev->intr_handle = NULL; eth_dev->data->kdrv = RTE_KDRV_NONE; eth_dev->data->numa_node = dev->device.numa_node; - data = eth_dev->data; - data->dev_private = nic; data->port_id = eth_dev->data->port_id; nic->ev_queues = 1; @@ -1132,7 +1216,16 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, data->all_multicast = 0; data->scattered_rx = 0; - data->mac_addrs = rte_zmalloc_socket(octtx_name, ETHER_ADDR_LEN, 0, + /* Get maximum number of supported MAC entries */ + max_entries = octeontx_bgx_port_mac_entries_get(nic->port_id); + if (max_entries < 0) { + octeontx_log_err("Failed to get max entries for mac addr"); + res = -ENOTSUP; + goto err; + } + + data->mac_addrs = rte_zmalloc_socket(octtx_name, max_entries * + RTE_ETHER_ADDR_LEN, 0, socket_id); if (data->mac_addrs == NULL) { octeontx_log_err("failed to allocate memory for mac_addrs"); @@ -1149,11 +1242,14 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, octeontx_log_err("eth_dev->port_id (%d) is diff to orig (%d)", data->port_id, nic->port_id); res = -EINVAL; - goto err; + goto free_mac_addrs; } /* Update port_id mac to eth_dev */ - memcpy(data->mac_addrs, nic->mac_addr, ETHER_ADDR_LEN); + memcpy(data->mac_addrs, nic->mac_addr, RTE_ETHER_ADDR_LEN); + + /* Update same mac address to BGX CAM table at index 0 */ + octeontx_bgx_port_mac_add(nic->port_id, nic->mac_addr, 0); PMD_INIT_LOG(DEBUG, "ethdev info: "); PMD_INIT_LOG(DEBUG, "port %d, port_ena %d ochan %d num_ochan %d tx_q %d", @@ -1165,18 +1261,16 @@ octeontx_create(struct rte_vdev_device *dev, int port, uint8_t evdev, rte_octeontx_pchan_map[(nic->base_ochan >> 8) & 0x7] [(nic->base_ochan >> 4) & 0xF] = data->port_id; + rte_eth_dev_probing_finish(eth_dev); return data->port_id; +free_mac_addrs: + rte_free(data->mac_addrs); err: if (nic) octeontx_port_close(nic); - if (eth_dev != NULL) { - rte_free(eth_dev->data->mac_addrs); - rte_free(data); - rte_free(nic); - rte_eth_dev_release_port(eth_dev); - } + rte_eth_dev_release_port(eth_dev); return res; } @@ -1201,16 +1295,22 @@ octeontx_remove(struct rte_vdev_device *dev) if (eth_dev == NULL) return -ENODEV; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + rte_eth_dev_release_port(eth_dev); + continue; + } + nic = octeontx_pmd_priv(eth_dev); rte_event_dev_stop(nic->evdev); PMD_INIT_LOG(INFO, "Closing octeontx device %s", octtx_name); - rte_free(eth_dev->data->mac_addrs); - rte_free(eth_dev->data->dev_private); rte_eth_dev_release_port(eth_dev); rte_event_dev_close(nic->evdev); } + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + /* Free FC resource */ octeontx_pko_fc_free(); @@ -1228,12 +1328,28 @@ octeontx_probe(struct rte_vdev_device *dev) struct rte_event_dev_config dev_conf; const char *eventdev_name = "event_octeontx"; struct rte_event_dev_info info; + struct rte_eth_dev *eth_dev; struct octeontx_vdev_init_params init_params = { OCTEONTX_VDEV_DEFAULT_MAX_NR_PORT }; dev_name = rte_vdev_device_name(dev); + + if (rte_eal_process_type() == RTE_PROC_SECONDARY && + strlen(rte_vdev_device_args(dev)) == 0) { + eth_dev = rte_eth_dev_attach_secondary(dev_name); + if (!eth_dev) { + PMD_INIT_LOG(ERR, "Failed to probe %s", dev_name); + return -1; + } + /* TODO: request info from primary to set up Rx and Tx */ + eth_dev->dev_ops = &octeontx_dev_ops; + eth_dev->device = &dev->device; + rte_eth_dev_probing_finish(eth_dev); + return 0; + } + res = octeontx_parse_vdev_init_params(&init_params, dev); if (res < 0) return -EINVAL; @@ -1288,15 +1404,8 @@ octeontx_probe(struct rte_vdev_device *dev) res = -EINVAL; goto parse_error; } - if (pnum > qnum) { - /* - * We don't poll on event ports - * that do not have any queues assigned. - */ - pnum = qnum; - PMD_INIT_LOG(INFO, - "reducing number of active event ports to %d", pnum); - } + + /* Enable all queues available */ for (i = 0; i < qnum; i++) { res = rte_event_queue_setup(evdev, i, NULL); if (res < 0) { @@ -1306,6 +1415,7 @@ octeontx_probe(struct rte_vdev_device *dev) } } + /* Enable all ports available */ for (i = 0; i < pnum; i++) { res = rte_event_port_setup(evdev, i, NULL); if (res < 0) { @@ -1314,6 +1424,14 @@ octeontx_probe(struct rte_vdev_device *dev) i, res); goto parse_error; } + } + + /* + * Do 1:1 links for ports & queues. All queues would be mapped to + * one port. If there are more ports than queues, then some ports + * won't be linked to any queue. + */ + for (i = 0; i < qnum; i++) { /* Link one queue to one event port */ qlist = i; res = rte_event_port_link(evdev, i, &qlist, NULL, 1);