/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "rte_ether.h"
#include "rte_ethdev.h"
+#include "ethdev_profile.h"
static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
static struct rte_eth_dev_data *rte_eth_dev_data;
static uint8_t eth_dev_last_created_port;
-static uint8_t nb_ports;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
void *cb_arg; /**< Parameter for callback */
+ void *ret_param; /**< Return parameter */
enum rte_eth_event_type event; /**< Interrupt event type */
uint32_t active; /**< Callback is executing */
};
unsigned i;
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if ((rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED) &&
- strcmp(rte_eth_devices[i].data->name, name) == 0)
- return &rte_eth_devices[i];
+ if (rte_eth_devices[i].state == RTE_ETH_DEV_ATTACHED &&
+ rte_eth_devices[i].device) {
+ if (!strcmp(rte_eth_devices[i].device->name, name))
+ return &rte_eth_devices[i];
+ }
}
return NULL;
}
TAILQ_INIT(&(eth_dev->link_intr_cbs));
eth_dev_last_created_port = port_id;
- nb_ports++;
return eth_dev;
}
return -EINVAL;
eth_dev->state = RTE_ETH_DEV_UNUSED;
- nb_ports--;
return 0;
}
rte_eth_dev_is_valid_port(uint8_t port_id)
{
if (port_id >= RTE_MAX_ETHPORTS ||
- rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED)
+ (rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
+ rte_eth_devices[port_id].state != RTE_ETH_DEV_DEFERRED))
return 0;
else
return 1;
uint8_t
rte_eth_dev_count(void)
{
- return nb_ports;
+ uint8_t p;
+ uint8_t count;
+
+ count = 0;
+
+ RTE_ETH_FOREACH_DEV(p)
+ count++;
+
+ return count;
}
int
rte_eth_dev_get_name_by_port(uint8_t port_id, char *name)
{
- char *tmp;
+ const char *tmp;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
/* shouldn't check 'rte_eth_devices[i].data',
* because it might be overwritten by VDEV PMD */
- tmp = rte_eth_dev_data[port_id].name;
+ tmp = rte_eth_devices[port_id].device->name;
strcpy(name, tmp);
return 0;
}
int
rte_eth_dev_get_port_by_name(const char *name, uint8_t *port_id)
{
+ int ret;
int i;
if (name == NULL) {
return -EINVAL;
}
- if (!nb_ports)
- return -ENODEV;
-
- *port_id = RTE_MAX_ETHPORTS;
RTE_ETH_FOREACH_DEV(i) {
- if (!strncmp(name,
- rte_eth_dev_data[i].name, strlen(name))) {
+ if (!rte_eth_devices[i].device)
+ continue;
+ ret = strncmp(name, rte_eth_devices[i].device->name,
+ strlen(name));
+ if (ret == 0) {
*port_id = i;
-
return 0;
}
}
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
- switch (rte_eth_devices[port_id].data->kdrv) {
- case RTE_KDRV_IGB_UIO:
- case RTE_KDRV_UIO_GENERIC:
- case RTE_KDRV_NIC_UIO:
- case RTE_KDRV_NONE:
- case RTE_KDRV_VFIO:
- break;
- default:
- return -ENOTSUP;
- }
dev_flags = rte_eth_devices[port_id].data->dev_flags;
if ((dev_flags & RTE_ETH_DEV_DETACHABLE) &&
(!(dev_flags & RTE_ETH_DEV_BONDED_SLAVE)))
if (rte_eth_dev_is_detachable(port_id))
goto err;
- snprintf(name, sizeof(rte_eth_devices[port_id].data->name),
- "%s", rte_eth_devices[port_id].data->name);
- ret = rte_eal_dev_detach(name);
+ snprintf(name, RTE_DEV_NAME_MAX_LEN, "%s",
+ rte_eth_devices[port_id].device->name);
+
+ ret = rte_eal_dev_detach(rte_eth_devices[port_id].device);
if (ret < 0)
goto err;
+ rte_eth_devices[port_id].state = RTE_ETH_DEV_UNUSED;
return 0;
err:
}
}
+/**
+ * A conversion function from rxmode bitfield API.
+ */
+static void
+rte_eth_convert_rx_offload_bitfield(const struct rte_eth_rxmode *rxmode,
+ uint64_t *rx_offloads)
+{
+ uint64_t offloads = 0;
+
+ if (rxmode->header_split == 1)
+ offloads |= DEV_RX_OFFLOAD_HEADER_SPLIT;
+ if (rxmode->hw_ip_checksum == 1)
+ offloads |= DEV_RX_OFFLOAD_CHECKSUM;
+ if (rxmode->hw_vlan_filter == 1)
+ offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ if (rxmode->hw_vlan_strip == 1)
+ offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ if (rxmode->hw_vlan_extend == 1)
+ offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ if (rxmode->jumbo_frame == 1)
+ offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ if (rxmode->hw_strip_crc == 1)
+ offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
+ if (rxmode->enable_scatter == 1)
+ offloads |= DEV_RX_OFFLOAD_SCATTER;
+ if (rxmode->enable_lro == 1)
+ offloads |= DEV_RX_OFFLOAD_TCP_LRO;
+
+ *rx_offloads = offloads;
+}
+
+/**
+ * A conversion function from rxmode offloads API.
+ */
+static void
+rte_eth_convert_rx_offloads(const uint64_t rx_offloads,
+ struct rte_eth_rxmode *rxmode)
+{
+
+ if (rx_offloads & DEV_RX_OFFLOAD_HEADER_SPLIT)
+ rxmode->header_split = 1;
+ else
+ rxmode->header_split = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
+ rxmode->hw_ip_checksum = 1;
+ else
+ rxmode->hw_ip_checksum = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
+ rxmode->hw_vlan_filter = 1;
+ else
+ rxmode->hw_vlan_filter = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ rxmode->hw_vlan_strip = 1;
+ else
+ rxmode->hw_vlan_strip = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
+ rxmode->hw_vlan_extend = 1;
+ else
+ rxmode->hw_vlan_extend = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
+ rxmode->jumbo_frame = 1;
+ else
+ rxmode->jumbo_frame = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP)
+ rxmode->hw_strip_crc = 1;
+ else
+ rxmode->hw_strip_crc = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_SCATTER)
+ rxmode->enable_scatter = 1;
+ else
+ rxmode->enable_scatter = 0;
+ if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO)
+ rxmode->enable_lro = 1;
+ else
+ rxmode->enable_lro = 0;
+}
+
int
rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
+ struct rte_eth_conf local_conf = *dev_conf;
int diag;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
return -EBUSY;
}
+ /*
+ * Convert between the offloads API to enable PMDs to support
+ * only one of them.
+ */
+ if ((dev_conf->rxmode.ignore_offload_bitfield == 0)) {
+ rte_eth_convert_rx_offload_bitfield(
+ &dev_conf->rxmode, &local_conf.rxmode.offloads);
+ } else {
+ rte_eth_convert_rx_offloads(dev_conf->rxmode.offloads,
+ &local_conf.rxmode);
+ }
+
/* Copy the dev_conf parameter into the dev structure */
- memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+ memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
/*
* Check that the numbers of RX and TX queues are not greater
return -EINVAL;
}
- /*
- * If link state interrupt is enabled, check that the
- * device supports it.
- */
+ /* Check that the device supports requested interrupts */
if ((dev_conf->intr_conf.lsc == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
RTE_PMD_DEBUG_TRACE("driver %s does not support lsc\n",
- dev->data->drv_name);
+ dev->device->driver->name);
return -EINVAL;
}
+ if ((dev_conf->intr_conf.rmv == 1) &&
+ (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
+ RTE_PMD_DEBUG_TRACE("driver %s does not support rmv\n",
+ dev->device->driver->name);
+ return -EINVAL;
+ }
/*
* If jumbo frames are enabled, check that the maximum RX packet
* length is supported by the configured device.
*/
- if (dev_conf->rxmode.jumbo_frame == 1) {
+ if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev_conf->rxmode.max_rx_pkt_len >
dev_info.max_rx_pktlen) {
RTE_PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
return diag;
}
+ /* Initialize Rx profiling if enabled at compilation time. */
+ diag = __rte_eth_profile_rx_init(port_id, dev);
+ if (diag != 0) {
+ RTE_PMD_DEBUG_TRACE("port%d __rte_eth_profile_rx_init = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ rte_eth_dev_tx_queue_config(dev, 0);
+ return diag;
+ }
+
return 0;
}
dev->data->dev_started = 0;
(*dev->dev_ops->dev_close)(dev);
+ dev->data->nb_rx_queues = 0;
rte_free(dev->data->rx_queues);
dev->data->rx_queues = NULL;
+ dev->data->nb_tx_queues = 0;
rte_free(dev->data->tx_queues);
dev->data->tx_queues = NULL;
}
+int
+rte_eth_dev_reset(uint8_t port_id)
+{
+ struct rte_eth_dev *dev;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_reset, -ENOTSUP);
+
+ rte_eth_dev_stop(port_id);
+ ret = dev->dev_ops->dev_reset(dev);
+
+ return ret;
+}
+
int
rte_eth_rx_queue_setup(uint8_t port_id, uint16_t rx_queue_id,
uint16_t nb_rx_desc, unsigned int socket_id,
uint32_t mbp_buf_size;
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
+ struct rte_eth_rxconf local_conf;
void **rxq;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
if (rx_conf == NULL)
rx_conf = &dev_info.default_rxconf;
+ local_conf = *rx_conf;
+ if (dev->data->dev_conf.rxmode.ignore_offload_bitfield == 0) {
+ /**
+ * Reflect port offloads to queue offloads in order for
+ * offloads to not be discarded.
+ */
+ rte_eth_convert_rx_offload_bitfield(&dev->data->dev_conf.rxmode,
+ &local_conf.offloads);
+ }
+
ret = (*dev->dev_ops->rx_queue_setup)(dev, rx_queue_id, nb_rx_desc,
- socket_id, rx_conf, mp);
+ socket_id, &local_conf, mp);
if (!ret) {
if (!dev->data->min_rx_buf_size ||
dev->data->min_rx_buf_size > mbp_buf_size)
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
dev = &rte_eth_devices[port_id];
- if (dev->dev_ops->xstats_get_names_by_ids != NULL) {
- count = (*dev->dev_ops->xstats_get_names_by_ids)(dev, NULL,
+ if (dev->dev_ops->xstats_get_names_by_id != NULL) {
+ count = (*dev->dev_ops->xstats_get_names_by_id)(dev, NULL,
NULL, 0);
if (count < 0)
return count;
if (!id) {
RTE_PMD_DEBUG_TRACE("Error: id pointer is NULL\n");
- return -1;
+ return -ENOMEM;
}
if (!xstat_name) {
RTE_PMD_DEBUG_TRACE("Error: xstat_name pointer is NULL\n");
- return -1;
+ return -ENOMEM;
}
/* Get count */
- cnt_xstats = rte_eth_xstats_get_names(port_id, NULL, 0, NULL);
+ cnt_xstats = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
if (cnt_xstats < 0) {
RTE_PMD_DEBUG_TRACE("Error: Cannot get count of xstats\n");
- return -1;
+ return -ENODEV;
}
/* Get id-name lookup table */
struct rte_eth_xstat_name xstats_names[cnt_xstats];
- if (cnt_xstats != rte_eth_xstats_get_names(
+ if (cnt_xstats != rte_eth_xstats_get_names_by_id(
port_id, xstats_names, cnt_xstats, NULL)) {
RTE_PMD_DEBUG_TRACE("Error: Cannot get xstats lookup\n");
return -1;
}
int
-rte_eth_xstats_get_names_v1607(uint8_t port_id,
- struct rte_eth_xstat_name *xstats_names,
- unsigned int size)
-{
- return rte_eth_xstats_get_names(port_id, xstats_names, size, NULL);
-}
-VERSION_SYMBOL(rte_eth_xstats_get_names, _v1607, 16.07);
-
-int
-rte_eth_xstats_get_names_v1705(uint8_t port_id,
+rte_eth_xstats_get_names_by_id(uint8_t port_id,
struct rte_eth_xstat_name *xstats_names, unsigned int size,
uint64_t *ids)
{
}
}
- if (dev->dev_ops->xstats_get_names_by_ids != NULL) {
+ if (dev->dev_ops->xstats_get_names_by_id != NULL) {
/* If there are any driver-specific xstats, append them
* to end of list.
*/
cnt_driver_entries =
- (*dev->dev_ops->xstats_get_names_by_ids)(
+ (*dev->dev_ops->xstats_get_names_by_id)(
dev,
xstats_names + cnt_used_entries,
NULL,
uint16_t len, i;
struct rte_eth_xstat_name *xstats_names_copy;
- len = rte_eth_xstats_get_names_v1705(port_id, NULL, 0, NULL);
+ len = rte_eth_xstats_get_names_by_id(port_id, NULL, 0, NULL);
xstats_names_copy =
malloc(sizeof(struct rte_eth_xstat_name) * len);
return -1;
}
- rte_eth_xstats_get_names_v1705(port_id, xstats_names_copy,
+ rte_eth_xstats_get_names_by_id(port_id, xstats_names_copy,
len, NULL);
for (i = 0; i < size; i++) {
return size;
}
}
-BIND_DEFAULT_SYMBOL(rte_eth_xstats_get_names, _v1705, 17.05);
-MAP_STATIC_SYMBOL(int
- rte_eth_xstats_get_names(uint8_t port_id,
- struct rte_eth_xstat_name *xstats_names,
- unsigned int size,
- uint64_t *ids), rte_eth_xstats_get_names_v1705);
-
-/* retrieve ethdev extended statistics */
int
-rte_eth_xstats_get_v22(uint8_t port_id, struct rte_eth_xstat *xstats,
- unsigned int n)
+rte_eth_xstats_get_names(uint8_t port_id,
+ struct rte_eth_xstat_name *xstats_names,
+ unsigned int size)
{
- uint64_t *values_copy;
- uint16_t size, i;
+ struct rte_eth_dev *dev;
+ int cnt_used_entries;
+ int cnt_expected_entries;
+ int cnt_driver_entries;
+ uint32_t idx, id_queue;
+ uint16_t num_q;
- values_copy = malloc(sizeof(values_copy) * n);
- if (!values_copy) {
- RTE_PMD_DEBUG_TRACE(
- "ERROR: Cannot allocate memory for xstats\n");
- return -1;
+ cnt_expected_entries = get_xstats_count(port_id);
+ if (xstats_names == NULL || cnt_expected_entries < 0 ||
+ (int)size < cnt_expected_entries)
+ return cnt_expected_entries;
+
+ /* port_id checked in get_xstats_count() */
+ dev = &rte_eth_devices[port_id];
+ cnt_used_entries = 0;
+
+ for (idx = 0; idx < RTE_NB_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "%s", rte_stats_strings[idx].name);
+ cnt_used_entries++;
}
- size = rte_eth_xstats_get(port_id, 0, values_copy, n);
+ num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (id_queue = 0; id_queue < num_q; id_queue++) {
+ for (idx = 0; idx < RTE_NB_RXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "rx_q%u%s",
+ id_queue, rte_rxq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
- for (i = 0; i < n; i++) {
- xstats[i].id = i;
- xstats[i].value = values_copy[i];
}
- free(values_copy);
- return size;
+ num_q = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ for (id_queue = 0; id_queue < num_q; id_queue++) {
+ for (idx = 0; idx < RTE_NB_TXQ_STATS; idx++) {
+ snprintf(xstats_names[cnt_used_entries].name,
+ sizeof(xstats_names[0].name),
+ "tx_q%u%s",
+ id_queue, rte_txq_stats_strings[idx].name);
+ cnt_used_entries++;
+ }
+ }
+
+ if (dev->dev_ops->xstats_get_names != NULL) {
+ /* If there are any driver-specific xstats, append them
+ * to end of list.
+ */
+ cnt_driver_entries = (*dev->dev_ops->xstats_get_names)(
+ dev,
+ xstats_names + cnt_used_entries,
+ size - cnt_used_entries);
+ if (cnt_driver_entries < 0)
+ return cnt_driver_entries;
+ cnt_used_entries += cnt_driver_entries;
+ }
+
+ return cnt_used_entries;
}
-VERSION_SYMBOL(rte_eth_xstats_get, _v22, 2.2);
/* retrieve ethdev extended statistics */
int
-rte_eth_xstats_get_v1705(uint8_t port_id, uint64_t *ids, uint64_t *values,
+rte_eth_xstats_get_by_id(uint8_t port_id, const uint64_t *ids, uint64_t *values,
unsigned int n)
{
/* If need all xstats */
/* implemented by the driver */
- if (dev->dev_ops->xstats_get_by_ids != NULL) {
+ if (dev->dev_ops->xstats_get_by_id != NULL) {
/* Retrieve the xstats from the driver at the end of the
* xstats struct. Retrieve all xstats.
*/
- xcount = (*dev->dev_ops->xstats_get_by_ids)(dev,
+ xcount = (*dev->dev_ops->xstats_get_by_id)(dev,
NULL,
values ? values + count : NULL,
(n > count) ? n - count : 0);
uint16_t i, size;
uint64_t *values_copy;
- size = rte_eth_xstats_get_v1705(port_id, NULL, NULL, 0);
+ size = rte_eth_xstats_get_by_id(port_id, NULL, NULL, 0);
- values_copy = malloc(sizeof(values_copy) * size);
+ values_copy = malloc(sizeof(*values_copy) * size);
if (!values_copy) {
RTE_PMD_DEBUG_TRACE(
"ERROR: can't allocate memory for values_copy\n");
return -1;
}
- rte_eth_xstats_get_v1705(port_id, NULL, values_copy, size);
+ rte_eth_xstats_get_by_id(port_id, NULL, values_copy, size);
for (i = 0; i < n; i++) {
if (ids[i] >= size) {
return n;
}
}
-BIND_DEFAULT_SYMBOL(rte_eth_xstats_get, _v1705, 17.05);
-MAP_STATIC_SYMBOL(int
- rte_eth_xstats_get(uint8_t port_id, uint64_t *ids,
- uint64_t *values, unsigned int n), rte_eth_xstats_get_v1705);
-
-__rte_deprecated int
-rte_eth_xstats_get_all(uint8_t port_id, struct rte_eth_xstat *xstats,
+int
+rte_eth_xstats_get(uint8_t port_id, struct rte_eth_xstat *xstats,
unsigned int n)
{
- uint64_t *values_copy;
- uint16_t size, i;
+ struct rte_eth_stats eth_stats;
+ struct rte_eth_dev *dev;
+ unsigned int count = 0, i, q;
+ signed int xcount = 0;
+ uint64_t val, *stats_ptr;
+ uint16_t nb_rxqs, nb_txqs;
- values_copy = malloc(sizeof(values_copy) * n);
- if (!values_copy) {
- RTE_PMD_DEBUG_TRACE(
- "ERROR: Cannot allocate memory for xstats\n");
- return -1;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+
+ nb_rxqs = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+ nb_txqs = RTE_MIN(dev->data->nb_tx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
+
+ /* Return generic statistics */
+ count = RTE_NB_STATS + (nb_rxqs * RTE_NB_RXQ_STATS) +
+ (nb_txqs * RTE_NB_TXQ_STATS);
+
+ /* implemented by the driver */
+ if (dev->dev_ops->xstats_get != NULL) {
+ /* Retrieve the xstats from the driver at the end of the
+ * xstats struct.
+ */
+ xcount = (*dev->dev_ops->xstats_get)(dev,
+ xstats ? xstats + count : NULL,
+ (n > count) ? n - count : 0);
+
+ if (xcount < 0)
+ return xcount;
}
- size = rte_eth_xstats_get(port_id, 0, values_copy, n);
- for (i = 0; i < n; i++) {
- xstats[i].id = i;
- xstats[i].value = values_copy[i];
+ if (n < count + xcount || xstats == NULL)
+ return count + xcount;
+
+ /* now fill the xstats structure */
+ count = 0;
+ rte_eth_stats_get(port_id, ð_stats);
+
+ /* global stats */
+ for (i = 0; i < RTE_NB_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(ð_stats,
+ rte_stats_strings[i].offset);
+ val = *stats_ptr;
+ xstats[count++].value = val;
}
- free(values_copy);
- return size;
-}
-__rte_deprecated int
-rte_eth_xstats_get_names_all(uint8_t port_id,
- struct rte_eth_xstat_name *xstats_names, unsigned int n)
-{
- return rte_eth_xstats_get_names(port_id, xstats_names, n, NULL);
+ /* per-rxq stats */
+ for (q = 0; q < nb_rxqs; q++) {
+ for (i = 0; i < RTE_NB_RXQ_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(ð_stats,
+ rte_rxq_stats_strings[i].offset +
+ q * sizeof(uint64_t));
+ val = *stats_ptr;
+ xstats[count++].value = val;
+ }
+ }
+
+ /* per-txq stats */
+ for (q = 0; q < nb_txqs; q++) {
+ for (i = 0; i < RTE_NB_TXQ_STATS; i++) {
+ stats_ptr = RTE_PTR_ADD(ð_stats,
+ rte_txq_stats_strings[i].offset +
+ q * sizeof(uint64_t));
+ val = *stats_ptr;
+ xstats[count++].value = val;
+ }
+ }
+
+ for (i = 0; i < count; i++)
+ xstats[i].id = i;
+ /* add an offset to driver-specific stats */
+ for ( ; i < count + xcount; i++)
+ xstats[i].id += count;
+
+ return count + xcount;
}
/* reset ethdev extended statistics */
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
- dev_info->driver_name = dev->data->drv_name;
+ dev_info->driver_name = dev->device->driver->name;
dev_info->nb_rx_queues = dev->data->nb_rx_queues;
dev_info->nb_tx_queues = dev->data->nb_tx_queues;
}
rte_eth_dev_vlan_filter(uint8_t port_id, uint16_t vlan_id, int on)
{
struct rte_eth_dev *dev;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (!(dev->data->dev_conf.rxmode.hw_vlan_filter)) {
+ if (!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)) {
RTE_PMD_DEBUG_TRACE("port %d: vlan-filtering disabled\n", port_id);
return -ENOSYS;
}
}
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_filter_set, -ENOTSUP);
- return (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+ ret = (*dev->dev_ops->vlan_filter_set)(dev, vlan_id, on);
+ if (ret == 0) {
+ struct rte_vlan_filter_conf *vfc;
+ int vidx;
+ int vbit;
+
+ vfc = &dev->data->vlan_filter_conf;
+ vidx = vlan_id / 64;
+ vbit = vlan_id % 64;
+
+ if (on)
+ vfc->ids[vidx] |= UINT64_C(1) << vbit;
+ else
+ vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
+ }
+
+ return ret;
}
int
/*check which option changed by application*/
cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
+ org = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP);
if (cur != org) {
- dev->data->dev_conf.rxmode.hw_vlan_strip = (uint8_t)cur;
+ if (cur)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_STRIP;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_STRIP;
mask |= ETH_VLAN_STRIP_MASK;
}
cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_filter);
+ org = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER);
if (cur != org) {
- dev->data->dev_conf.rxmode.hw_vlan_filter = (uint8_t)cur;
+ if (cur)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_FILTER;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_FILTER;
mask |= ETH_VLAN_FILTER_MASK;
}
cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
- org = !!(dev->data->dev_conf.rxmode.hw_vlan_extend);
+ org = !!(dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND);
if (cur != org) {
- dev->data->dev_conf.rxmode.hw_vlan_extend = (uint8_t)cur;
+ if (cur)
+ dev->data->dev_conf.rxmode.offloads |=
+ DEV_RX_OFFLOAD_VLAN_EXTEND;
+ else
+ dev->data->dev_conf.rxmode.offloads &=
+ ~DEV_RX_OFFLOAD_VLAN_EXTEND;
mask |= ETH_VLAN_EXTEND_MASK;
}
return ret;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_offload_set, -ENOTSUP);
+
+ /*
+ * Convert to the offload bitfield API just in case the underlying PMD
+ * still supporting it.
+ */
+ rte_eth_convert_rx_offloads(dev->data->dev_conf.rxmode.offloads,
+ &dev->data->dev_conf.rxmode);
(*dev->dev_ops->vlan_offload_set)(dev, mask);
return ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_conf.rxmode.hw_vlan_strip)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP)
ret |= ETH_VLAN_STRIP_OFFLOAD;
- if (dev->data->dev_conf.rxmode.hw_vlan_filter)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_FILTER)
ret |= ETH_VLAN_FILTER_OFFLOAD;
- if (dev->data->dev_conf.rxmode.hw_vlan_extend)
+ if (dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_EXTEND)
ret |= ETH_VLAN_EXTEND_OFFLOAD;
return ret;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
unsigned i;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_dev_info_get(port_id, &dev_info);
for (i = 0; i < dev_info.max_mac_addrs; i++)
struct rte_eth_dev *dev;
int index;
uint64_t pool_mask;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
}
/* Update NIC */
- (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
+ ret = (*dev->dev_ops->mac_addr_add)(dev, addr, index, pool);
- /* Update address in NIC data structure */
- ether_addr_copy(addr, &dev->data->mac_addrs[index]);
+ if (ret == 0) {
+ /* Update address in NIC data structure */
+ ether_addr_copy(addr, &dev->data->mac_addrs[index]);
- /* Update pool bitmap in NIC data structure */
- dev->data->mac_pool_sel[index] |= (1ULL << pool);
+ /* Update pool bitmap in NIC data structure */
+ dev->data->mac_pool_sel[index] |= (1ULL << pool);
+ }
- return 0;
+ return ret;
}
int
return ret;
}
-void
+int
_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *cb_arg)
+ enum rte_eth_event_type event, void *cb_arg, void *ret_param)
{
struct rte_eth_dev_callback *cb_lst;
struct rte_eth_dev_callback dev_cb;
+ int rc = 0;
rte_spinlock_lock(&rte_eth_dev_cb_lock);
TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
cb_lst->active = 1;
if (cb_arg != NULL)
dev_cb.cb_arg = cb_arg;
+ if (ret_param != NULL)
+ dev_cb.ret_param = ret_param;
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
- dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
- dev_cb.cb_arg);
+ rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
+ dev_cb.cb_arg, dev_cb.ret_param);
rte_spinlock_lock(&rte_eth_dev_cb_lock);
cb_lst->active = 0;
}
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return rc;
}
int
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->data->drv_name, ring_name,
+ dev->device->driver->name, ring_name,
dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
return (*dev->dev_ops->rx_queue_intr_disable)(dev, queue_id);
}
-#ifdef RTE_NIC_BYPASS
-int rte_eth_dev_bypass_init(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_init, -ENOTSUP);
- (*dev->dev_ops->bypass_init)(dev);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_state_show(uint8_t port_id, uint32_t *state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
- (*dev->dev_ops->bypass_state_show)(dev, state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_state_set(uint8_t port_id, uint32_t *new_state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_set, -ENOTSUP);
- (*dev->dev_ops->bypass_state_set)(dev, new_state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_event_show(uint8_t port_id, uint32_t event, uint32_t *state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_state_show, -ENOTSUP);
- (*dev->dev_ops->bypass_event_show)(dev, event, state);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_event_store(uint8_t port_id, uint32_t event, uint32_t state)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_event_set, -ENOTSUP);
- (*dev->dev_ops->bypass_event_set)(dev, event, state);
- return 0;
-}
-
-int
-rte_eth_dev_wd_timeout_store(uint8_t port_id, uint32_t timeout)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_set, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_timeout_set)(dev, timeout);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_ver_show(uint8_t port_id, uint32_t *ver)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_ver_show, -ENOTSUP);
- (*dev->dev_ops->bypass_ver_show)(dev, ver);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_wd_timeout_show(uint8_t port_id, uint32_t *wd_timeout)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_timeout_show, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_timeout_show)(dev, wd_timeout);
- return 0;
-}
-
-int
-rte_eth_dev_bypass_wd_reset(uint8_t port_id)
-{
- struct rte_eth_dev *dev;
-
- RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
-
- dev = &rte_eth_devices[port_id];
-
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->bypass_wd_reset, -ENOTSUP);
- (*dev->dev_ops->bypass_wd_reset)(dev);
- return 0;
-}
-#endif
int
rte_eth_dev_filter_supported(uint8_t port_id, enum rte_filter_type filter_type)
-ENOTSUP);
return (*dev->dev_ops->l2_tunnel_offload_set)(dev, l2_tunnel, mask, en);
}
+
+static void
+rte_eth_dev_adjust_nb_desc(uint16_t *nb_desc,
+ const struct rte_eth_desc_lim *desc_lim)
+{
+ if (desc_lim->nb_align != 0)
+ *nb_desc = RTE_ALIGN_CEIL(*nb_desc, desc_lim->nb_align);
+
+ if (desc_lim->nb_max != 0)
+ *nb_desc = RTE_MIN(*nb_desc, desc_lim->nb_max);
+
+ *nb_desc = RTE_MAX(*nb_desc, desc_lim->nb_min);
+}
+
+int
+rte_eth_dev_adjust_nb_rx_tx_desc(uint8_t port_id,
+ uint16_t *nb_rx_desc,
+ uint16_t *nb_tx_desc)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+
+ dev = &rte_eth_devices[port_id];
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
+
+ rte_eth_dev_info_get(port_id, &dev_info);
+
+ if (nb_rx_desc != NULL)
+ rte_eth_dev_adjust_nb_desc(nb_rx_desc, &dev_info.rx_desc_lim);
+
+ if (nb_tx_desc != NULL)
+ rte_eth_dev_adjust_nb_desc(nb_tx_desc, &dev_info.tx_desc_lim);
+
+ return 0;
+}