X-Git-Url: http://git.droids-corp.org/?a=blobdiff_plain;f=lib%2Flibrte_ether%2Frte_ethdev.c;h=798af41e6cb9292f01a484a180628a9755482ad6;hb=cbb4c648c5dfff6fced96c5c76166a3e0e048fa4;hp=fe56f65821577e626616757e369f57dc97f41197;hpb=ea85e7d711b664558a53a8131e22fdff952e5241;p=dpdk.git diff --git a/lib/librte_ether/rte_ethdev.c b/lib/librte_ether/rte_ethdev.c index fe56f65821..798af41e6c 100644 --- a/lib/librte_ether/rte_ethdev.c +++ b/lib/librte_ether/rte_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2017 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -129,6 +129,7 @@ struct rte_eth_dev_callback { TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */ rte_eth_dev_cb_fn cb_fn; /**< Callback address */ void *cb_arg; /**< Parameter for callback */ + void *ret_param; /**< Return parameter */ enum rte_eth_event_type event; /**< Interrupt event type */ uint32_t active; /**< Callback is executing */ }; @@ -339,7 +340,6 @@ rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id) if (!nb_ports) return -ENODEV; - *port_id = RTE_MAX_ETHPORTS; RTE_ETH_FOREACH_DEV(i) { if (!strncmp(name, rte_eth_dev_data[i].name, strlen(name))) { @@ -440,7 +440,8 @@ rte_eth_dev_detach(uint8_t port_id, char *name) snprintf(name, sizeof(rte_eth_devices[port_id].data->name), "%s", rte_eth_devices[port_id].data->name); - ret = rte_eal_dev_detach(name); + + ret = rte_eal_dev_detach(rte_eth_devices[port_id].device); if (ret < 0) goto err; @@ -749,16 +750,19 @@ rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q, return -EINVAL; } - /* - * If link state interrupt is enabled, check that the - * device supports it. - */ + /* Check that the device supports requested interrupts */ if ((dev_conf->intr_conf.lsc == 1) && (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) { RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n", - dev->data->drv_name); + dev->device->driver->name); return -EINVAL; } + if ((dev_conf->intr_conf.rmv == 1) && + (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) { + RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n", + dev->device->driver->name); + return -EINVAL; + } /* * If jumbo frames are enabled, check that the maximum RX packet @@ -983,8 +987,10 @@ rte_eth_dev_close(uint8_t port_id) dev->data->dev_started = 0; (*dev->dev_ops->dev_close)(dev); + dev->data->nb_rx_queues = 0; rte_free(dev->data->rx_queues); dev->data->rx_queues = NULL; + dev->data->nb_tx_queues = 0; rte_free(dev->data->tx_queues); dev->data->tx_queues = NULL; } @@ -1357,8 +1363,8 @@ get_xstats_count(uint8_t port_id) RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); dev = &rte_eth_devices[port_id]; - if (dev->dev_ops->xstats_get_names_by_ids != NULL) { - count = (*dev->dev_ops->xstats_get_names_by_ids)(dev, NULL, + if (dev->dev_ops->xstats_get_names_by_id != NULL) { + count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL, NULL, 0); if (count < 0) return count; @@ -1379,16 +1385,51 @@ get_xstats_count(uint8_t port_id) } int -rte_eth_xstats_get_names_v1607(uint8_t port_id, - struct rte_eth_xstat_name *xstats_names, - unsigned int size) +rte_eth_xstats_get_id_by_name(uint8_t port_id, const char *xstat_name, + uint64_t *id) { - return rte_eth_xstats_get_names(port_id, xstats_names, size, NULL); + int cnt_xstats, idx_xstat; + + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); + + if (!id) { + RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n"); + return -ENOMEM; + } + + if (!xstat_name) { + RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n"); + return -ENOMEM; + } + + /* Get count */ + cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); + if (cnt_xstats < 0) { + RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n"); + return -ENODEV; + } + + /* Get id-name lookup table */ + struct rte_eth_xstat_name xstats_names[cnt_xstats]; + + if (cnt_xstats != rte_eth_xstats_get_names_by_id( + port_id, xstats_names, cnt_xstats, NULL)) { + RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n"); + return -1; + } + + for (idx_xstat = 0; idx_xstat < cnt_xstats; idx_xstat++) { + if (!strcmp(xstats_names[idx_xstat].name, xstat_name)) { + *id = idx_xstat; + return 0; + }; + } + + return -EINVAL; } -VERSION_SYMBOL(rte_eth_xstats_get_names, _v1607, 16.07); int -rte_eth_xstats_get_names_v1705(uint8_t port_id, +rte_eth_xstats_get_names_by_id(uint8_t port_id, struct rte_eth_xstat_name *xstats_names, unsigned int size, uint64_t *ids) { @@ -1442,12 +1483,12 @@ rte_eth_xstats_get_names_v1705(uint8_t port_id, } } - if (dev->dev_ops->xstats_get_names_by_ids != NULL) { + if (dev->dev_ops->xstats_get_names_by_id != NULL) { /* If there are any driver-specific xstats, append them * to end of list. */ cnt_driver_entries = - (*dev->dev_ops->xstats_get_names_by_ids)( + (*dev->dev_ops->xstats_get_names_by_id)( dev, xstats_names + cnt_used_entries, NULL, @@ -1476,7 +1517,7 @@ rte_eth_xstats_get_names_v1705(uint8_t port_id, uint16_t len, i; struct rte_eth_xstat_name *xstats_names_copy; - len = rte_eth_xstats_get_names_v1705(port_id, NULL, 0, NULL); + len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL); xstats_names_copy = malloc(sizeof(struct rte_eth_xstat_name) * len); @@ -1487,7 +1528,7 @@ rte_eth_xstats_get_names_v1705(uint8_t port_id, return -1; } - rte_eth_xstats_get_names_v1705(port_id, xstats_names_copy, + rte_eth_xstats_get_names_by_id(port_id, xstats_names_copy, len, NULL); for (i = 0; i < size; i++) { @@ -1503,42 +1544,75 @@ rte_eth_xstats_get_names_v1705(uint8_t port_id, return size; } } -BIND_DEFAULT_SYMBOL(rte_eth_xstats_get_names, _v1705, 17.05); - -MAP_STATIC_SYMBOL(int - rte_eth_xstats_get_names(uint8_t port_id, - struct rte_eth_xstat_name *xstats_names, - unsigned int size, - uint64_t *ids), rte_eth_xstats_get_names_v1705); -/* retrieve ethdev extended statistics */ int -rte_eth_xstats_get_v22(uint8_t port_id, struct rte_eth_xstat *xstats, - unsigned int n) +rte_eth_xstats_get_names(uint8_t port_id, + struct rte_eth_xstat_name *xstats_names, + unsigned int size) { - uint64_t *values_copy; - uint16_t size, i; + struct rte_eth_dev *dev; + int cnt_used_entries; + int cnt_expected_entries; + int cnt_driver_entries; + uint32_t idx, id_queue; + uint16_t num_q; - values_copy = malloc(sizeof(values_copy) * n); - if (!values_copy) { - RTE_PMD_DEBUG_TRACE( - "ERROR: Cannot allocate memory for xstats\n"); - return -1; + cnt_expected_entries = get_xstats_count(port_id); + if (xstats_names == NULL || cnt_expected_entries < 0 || + (int)size < cnt_expected_entries) + return cnt_expected_entries; + + /* port_id checked in get_xstats_count() */ + dev = &rte_eth_devices[port_id]; + cnt_used_entries = 0; + + for (idx = 0; idx < RTE_NB_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "%s", rte_stats_strings[idx].name); + cnt_used_entries++; } - size = rte_eth_xstats_get(port_id, 0, values_copy, n); + num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (id_queue = 0; id_queue < num_q; id_queue++) { + for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "rx_q%u%s", + id_queue, rte_rxq_stats_strings[idx].name); + cnt_used_entries++; + } - for (i = 0; i < n; i++) { - xstats[i].id = i; - xstats[i].value = values_copy[i]; } - free(values_copy); - return size; + num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (id_queue = 0; id_queue < num_q; id_queue++) { + for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) { + snprintf(xstats_names[cnt_used_entries].name, + sizeof(xstats_names[0].name), + "tx_q%u%s", + id_queue, rte_txq_stats_strings[idx].name); + cnt_used_entries++; + } + } + + if (dev->dev_ops->xstats_get_names != NULL) { + /* If there are any driver-specific xstats, append them + * to end of list. + */ + cnt_driver_entries = (*dev->dev_ops->xstats_get_names)( + dev, + xstats_names + cnt_used_entries, + size - cnt_used_entries); + if (cnt_driver_entries < 0) + return cnt_driver_entries; + cnt_used_entries += cnt_driver_entries; + } + + return cnt_used_entries; } -VERSION_SYMBOL(rte_eth_xstats_get, _v22, 2.2); /* retrieve ethdev extended statistics */ int -rte_eth_xstats_get_v1705(uint8_t port_id, uint64_t *ids, uint64_t *values, +rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values, unsigned int n) { /* If need all xstats */ @@ -1564,11 +1638,11 @@ rte_eth_xstats_get_v1705(uint8_t port_id, uint64_t *ids, uint64_t *values, /* implemented by the driver */ - if (dev->dev_ops->xstats_get_by_ids != NULL) { + if (dev->dev_ops->xstats_get_by_id != NULL) { /* Retrieve the xstats from the driver at the end of the * xstats struct. Retrieve all xstats. */ - xcount = (*dev->dev_ops->xstats_get_by_ids)(dev, + xcount = (*dev->dev_ops->xstats_get_by_id)(dev, NULL, values ? values + count : NULL, (n > count) ? n - count : 0); @@ -1639,16 +1713,16 @@ rte_eth_xstats_get_v1705(uint8_t port_id, uint64_t *ids, uint64_t *values, uint16_t i, size; uint64_t *values_copy; - size = rte_eth_xstats_get_v1705(port_id, NULL, NULL, 0); + size = rte_eth_xstats_get_by_id(port_id, NULL, NULL, 0); - values_copy = malloc(sizeof(values_copy) * size); + values_copy = malloc(sizeof(*values_copy) * size); if (!values_copy) { RTE_PMD_DEBUG_TRACE( "ERROR: can't allocate memory for values_copy\n"); return -1; } - rte_eth_xstats_get_v1705(port_id, NULL, values_copy, size); + rte_eth_xstats_get_by_id(port_id, NULL, values_copy, size); for (i = 0; i < n; i++) { if (ids[i] >= size) { @@ -1662,40 +1736,86 @@ rte_eth_xstats_get_v1705(uint8_t port_id, uint64_t *ids, uint64_t *values, return n; } } -BIND_DEFAULT_SYMBOL(rte_eth_xstats_get, _v1705, 17.05); - -MAP_STATIC_SYMBOL(int - rte_eth_xstats_get(uint8_t port_id, uint64_t *ids, - uint64_t *values, unsigned int n), rte_eth_xstats_get_v1705); int -rte_eth_xstats_get_all(uint8_t port_id, struct rte_eth_xstat *xstats, +rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats, unsigned int n) { - uint64_t *values_copy; - uint16_t size, i; + struct rte_eth_stats eth_stats; + struct rte_eth_dev *dev; + unsigned int count = 0, i, q; + signed int xcount = 0; + uint64_t val, *stats_ptr; + uint16_t nb_rxqs, nb_txqs; - values_copy = malloc(sizeof(values_copy) * n); - if (!values_copy) { - RTE_PMD_DEBUG_TRACE( - "ERROR: Cannot allocate memory for xstats\n"); - return -1; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL); + + dev = &rte_eth_devices[port_id]; + + nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS); + + /* Return generic statistics */ + count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) + + (nb_txqs * RTE_NB_TXQ_STATS); + + /* implemented by the driver */ + if (dev->dev_ops->xstats_get != NULL) { + /* Retrieve the xstats from the driver at the end of the + * xstats struct. + */ + xcount = (*dev->dev_ops->xstats_get)(dev, + xstats ? xstats + count : NULL, + (n > count) ? n - count : 0); + + if (xcount < 0) + return xcount; } - size = rte_eth_xstats_get(port_id, 0, values_copy, n); - for (i = 0; i < n; i++) { - xstats[i].id = i; - xstats[i].value = values_copy[i]; + if (n < count + xcount || xstats == NULL) + return count + xcount; + + /* now fill the xstats structure */ + count = 0; + rte_eth_stats_get(port_id, ð_stats); + + /* global stats */ + for (i = 0; i < RTE_NB_STATS; i++) { + stats_ptr = RTE_PTR_ADD(ð_stats, + rte_stats_strings[i].offset); + val = *stats_ptr; + xstats[count++].value = val; } - free(values_copy); - return size; -} -int -rte_eth_xstats_get_names_all(uint8_t port_id, - struct rte_eth_xstat_name *xstats_names, unsigned int n) -{ - return rte_eth_xstats_get_names(port_id, xstats_names, n, NULL); + /* per-rxq stats */ + for (q = 0; q < nb_rxqs; q++) { + for (i = 0; i < RTE_NB_RXQ_STATS; i++) { + stats_ptr = RTE_PTR_ADD(ð_stats, + rte_rxq_stats_strings[i].offset + + q * sizeof(uint64_t)); + val = *stats_ptr; + xstats[count++].value = val; + } + } + + /* per-txq stats */ + for (q = 0; q < nb_txqs; q++) { + for (i = 0; i < RTE_NB_TXQ_STATS; i++) { + stats_ptr = RTE_PTR_ADD(ð_stats, + rte_txq_stats_strings[i].offset + + q * sizeof(uint64_t)); + val = *stats_ptr; + xstats[count++].value = val; + } + } + + for (i = 0; i < count; i++) + xstats[i].id = i; + /* add an offset to driver-specific stats */ + for ( ; i < count + xcount; i++) + xstats[i].id += count; + + return count + xcount; } /* reset ethdev extended statistics */ @@ -1781,7 +1901,7 @@ rte_eth_dev_info_get(uint8_t port_id, struct rte_eth_dev_info *dev_info) RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get); (*dev->dev_ops->dev_infos_get)(dev, dev_info); - dev_info->driver_name = dev->data->drv_name; + dev_info->driver_name = dev->device->driver->name; dev_info->nb_rx_queues = dev->data->nb_rx_queues; dev_info->nb_tx_queues = dev->data->nb_tx_queues; } @@ -2250,6 +2370,7 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, struct rte_eth_dev *dev; int index; uint64_t pool_mask; + int ret; RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); dev = &rte_eth_devices[port_id]; @@ -2282,15 +2403,17 @@ rte_eth_dev_mac_addr_add(uint8_t port_id, struct ether_addr *addr, } /* Update NIC */ - (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); + ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool); - /* Update address in NIC data structure */ - ether_addr_copy(addr, &dev->data->mac_addrs[index]); + if (ret == 0) { + /* Update address in NIC data structure */ + ether_addr_copy(addr, &dev->data->mac_addrs[index]); - /* Update pool bitmap in NIC data structure */ - dev->data->mac_pool_sel[index] |= (1ULL << pool); + /* Update pool bitmap in NIC data structure */ + dev->data->mac_pool_sel[index] |= (1ULL << pool); + } - return 0; + return ret; } int @@ -2596,12 +2719,13 @@ rte_eth_dev_callback_unregister(uint8_t port_id, return ret; } -void +int _rte_eth_dev_callback_process(struct rte_eth_dev *dev, - enum rte_eth_event_type event, void *cb_arg) + enum rte_eth_event_type event, void *cb_arg, void *ret_param) { struct rte_eth_dev_callback *cb_lst; struct rte_eth_dev_callback dev_cb; + int rc = 0; rte_spinlock_lock(&rte_eth_dev_cb_lock); TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) { @@ -2611,14 +2735,17 @@ _rte_eth_dev_callback_process(struct rte_eth_dev *dev, cb_lst->active = 1; if (cb_arg != NULL) dev_cb.cb_arg = cb_arg; + if (ret_param != NULL) + dev_cb.ret_param = ret_param; rte_spinlock_unlock(&rte_eth_dev_cb_lock); - dev_cb.cb_fn(dev->data->port_id, dev_cb.event, - dev_cb.cb_arg); + rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event, + dev_cb.cb_arg, dev_cb.ret_param); rte_spinlock_lock(&rte_eth_dev_cb_lock); cb_lst->active = 0; } rte_spinlock_unlock(&rte_eth_dev_cb_lock); + return rc; } int @@ -2667,7 +2794,7 @@ rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name, const struct rte_memzone *mz; snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d", - dev->data->drv_name, ring_name, + dev->device->driver->name, ring_name, dev->data->port_id, queue_id); mz = rte_memzone_lookup(z_name); @@ -2750,128 +2877,6 @@ rte_eth_dev_rx_intr_disable(uint8_t port_id, return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id); } -#ifdef RTE_NIC_BYPASS -int rte_eth_dev_bypass_init(uint8_t port_id) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP); - (*dev->dev_ops->bypass_init)(dev); - return 0; -} - -int -rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); - (*dev->dev_ops->bypass_state_show)(dev, state); - return 0; -} - -int -rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP); - (*dev->dev_ops->bypass_state_set)(dev, new_state); - return 0; -} - -int -rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP); - (*dev->dev_ops->bypass_event_show)(dev, event, state); - return 0; -} - -int -rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP); - (*dev->dev_ops->bypass_event_set)(dev, event, state); - return 0; -} - -int -rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP); - (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout); - return 0; -} - -int -rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP); - (*dev->dev_ops->bypass_ver_show)(dev, ver); - return 0; -} - -int -rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP); - (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout); - return 0; -} - -int -rte_eth_dev_bypass_wd_reset(uint8_t port_id) -{ - struct rte_eth_dev *dev; - - RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV); - - dev = &rte_eth_devices[port_id]; - - RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP); - (*dev->dev_ops->bypass_wd_reset)(dev); - return 0; -} -#endif int rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)