static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
-static uint16_t eth_dev_last_created_port;
/* spinlock for eth device callbacks */
static rte_spinlock_t rte_eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
STAT_QMAP_RX
};
-int __rte_experimental
+int
rte_eth_iterator_init(struct rte_dev_iterator *iter, const char *devargs_str)
{
int ret;
return ret;
}
-uint16_t __rte_experimental
+uint16_t
rte_eth_iterator_next(struct rte_dev_iterator *iter)
{
if (iter->cls == NULL) /* invalid ethdev iterator */
return RTE_MAX_ETHPORTS;
}
-void __rte_experimental
+void
rte_eth_iterator_cleanup(struct rte_dev_iterator *iter)
{
if (iter->bus_str == NULL)
rte_eth_find_next(uint16_t port_id)
{
while (port_id < RTE_MAX_ETHPORTS &&
- rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
- rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED)
+ rte_eth_devices[port_id].state == RTE_ETH_DEV_UNUSED)
port_id++;
if (port_id >= RTE_MAX_ETHPORTS)
return port_id;
}
+/*
+ * Macro to iterate over all valid ports for internal usage.
+ * Note: RTE_ETH_FOREACH_DEV is different because filtering owned ports.
+ */
+#define RTE_ETH_FOREACH_VALID_DEV(port_id) \
+ for (port_id = rte_eth_find_next(0); \
+ port_id < RTE_MAX_ETHPORTS; \
+ port_id = rte_eth_find_next(port_id + 1))
+
+uint16_t
+rte_eth_find_next_of(uint16_t port_id, const struct rte_device *parent)
+{
+ port_id = rte_eth_find_next(port_id);
+ while (port_id < RTE_MAX_ETHPORTS &&
+ rte_eth_devices[port_id].device != parent)
+ port_id = rte_eth_find_next(port_id + 1);
+
+ return port_id;
+}
+
+uint16_t
+rte_eth_find_next_sibling(uint16_t port_id, uint16_t ref_port_id)
+{
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(ref_port_id, RTE_MAX_ETHPORTS);
+ return rte_eth_find_next_of(port_id,
+ rte_eth_devices[ref_port_id].device);
+}
+
static void
rte_eth_dev_shared_data_prepare(void)
{
eth_dev->data = &rte_eth_dev_shared_data->data[port_id];
- eth_dev_last_created_port = port_id;
-
return eth_dev;
}
{
uint16_t port_id;
struct rte_eth_dev *eth_dev = NULL;
+ size_t name_len;
+
+ name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
+ if (name_len == 0) {
+ RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
+ return NULL;
+ }
+
+ if (name_len >= RTE_ETH_NAME_MAX_LEN) {
+ RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
+ return NULL;
+ }
rte_eth_dev_shared_data_prepare();
}
eth_dev = eth_dev_get(port_id);
- snprintf(eth_dev->data->name, sizeof(eth_dev->data->name), "%s", name);
+ strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
eth_dev->data->port_id = port_id;
- eth_dev->data->mtu = ETHER_MTU;
+ eth_dev->data->mtu = RTE_ETHER_MTU;
unlock:
rte_spinlock_unlock(&rte_eth_dev_shared_data->ownership_lock);
uint64_t
rte_eth_find_next_owned_by(uint16_t port_id, const uint64_t owner_id)
{
+ port_id = rte_eth_find_next(port_id);
while (port_id < RTE_MAX_ETHPORTS &&
- ((rte_eth_devices[port_id].state != RTE_ETH_DEV_ATTACHED &&
- rte_eth_devices[port_id].state != RTE_ETH_DEV_REMOVED) ||
- rte_eth_devices[port_id].data->owner.id != owner_id))
- port_id++;
-
- if (port_id >= RTE_MAX_ETHPORTS)
- return RTE_MAX_ETHPORTS;
+ rte_eth_devices[port_id].data->owner.id != owner_id)
+ port_id = rte_eth_find_next(port_id + 1);
return port_id;
}
{
struct rte_eth_dev *ethdev = &rte_eth_devices[port_id];
struct rte_eth_dev_owner *port_owner;
- int sret;
if (port_id >= RTE_MAX_ETHPORTS || !is_allocated(ethdev)) {
RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
return -EPERM;
}
- sret = snprintf(port_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN, "%s",
- new_owner->name);
- if (sret < 0 || sret >= RTE_ETH_MAX_OWNER_NAME_LEN)
- RTE_ETHDEV_LOG(ERR, "Port %u owner name was truncated\n",
- port_id);
+ /* can not truncate (same structure) */
+ strlcpy(port_owner->name, new_owner->name, RTE_ETH_MAX_OWNER_NAME_LEN);
port_owner->id = new_owner->id;
return count;
}
-uint16_t __rte_experimental
+uint16_t
rte_eth_dev_count_total(void)
{
uint16_t port, count = 0;
- for (port = 0; port < RTE_MAX_ETHPORTS; port++)
- if (rte_eth_devices[port].state != RTE_ETH_DEV_UNUSED)
- count++;
+ RTE_ETH_FOREACH_VALID_DEV(port)
+ count++;
return count;
}
return -EINVAL;
}
- for (pid = 0; pid < RTE_MAX_ETHPORTS; pid++) {
- if (rte_eth_devices[pid].state != RTE_ETH_DEV_UNUSED &&
- !strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
+ RTE_ETH_FOREACH_VALID_DEV(pid)
+ if (!strcmp(name, rte_eth_dev_shared_data->data[pid].name)) {
*port_id = pid;
return 0;
}
- }
return -ENODEV;
}
{
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
- struct rte_eth_conf local_conf = *dev_conf;
+ struct rte_eth_conf orig_conf;
int diag;
+ int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_configure, -ENOTSUP);
+ if (dev->data->dev_started) {
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u must be stopped to allow configuration\n",
+ port_id);
+ return -EBUSY;
+ }
+
+ /* Store original config, as rollback required on failure */
+ memcpy(&orig_conf, &dev->data->dev_conf, sizeof(dev->data->dev_conf));
+
+ /*
+ * Copy the dev_conf parameter into the dev structure.
+ * rte_eth_dev_info_get() requires dev_conf, copy it before dev_info get
+ */
+ memcpy(&dev->data->dev_conf, dev_conf, sizeof(dev->data->dev_conf));
+
rte_eth_dev_info_get(port_id, &dev_info);
/* If number of queues specified by application for both Rx and Tx is
RTE_ETHDEV_LOG(ERR,
"Number of RX queues requested (%u) is greater than max supported(%d)\n",
nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
RTE_ETHDEV_LOG(ERR,
"Number of TX queues requested (%u) is greater than max supported(%d)\n",
nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
- return -EINVAL;
- }
-
- if (dev->data->dev_started) {
- RTE_ETHDEV_LOG(ERR,
- "Port %u must be stopped to allow configuration\n",
- port_id);
- return -EBUSY;
+ ret = -EINVAL;
+ goto rollback;
}
- /* Copy the dev_conf parameter into the dev structure */
- memcpy(&dev->data->dev_conf, &local_conf, sizeof(dev->data->dev_conf));
-
/*
* Check that the numbers of RX and TX queues are not greater
* than the maximum number of RX and TX queues supported by the
if (nb_rx_q > dev_info.max_rx_queues) {
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_rx_queues=%u > %u\n",
port_id, nb_rx_q, dev_info.max_rx_queues);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
if (nb_tx_q > dev_info.max_tx_queues) {
RTE_ETHDEV_LOG(ERR, "Ethdev port_id=%u nb_tx_queues=%u > %u\n",
port_id, nb_tx_q, dev_info.max_tx_queues);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
/* Check that the device supports requested interrupts */
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC))) {
RTE_ETHDEV_LOG(ERR, "Driver %s does not support lsc\n",
dev->device->driver->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
if ((dev_conf->intr_conf.rmv == 1) &&
(!(dev->data->dev_flags & RTE_ETH_DEV_INTR_RMV))) {
RTE_ETHDEV_LOG(ERR, "Driver %s does not support rmv\n",
dev->device->driver->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
/*
* If jumbo frames are enabled, check that the maximum RX packet
* length is supported by the configured device.
*/
- if (local_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
+ if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) {
if (dev_conf->rxmode.max_rx_pkt_len > dev_info.max_rx_pktlen) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u max_rx_pkt_len %u > max valid value %u\n",
port_id, dev_conf->rxmode.max_rx_pkt_len,
dev_info.max_rx_pktlen);
- return -EINVAL;
- } else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
+ ret = -EINVAL;
+ goto rollback;
+ } else if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u max_rx_pkt_len %u < min valid value %u\n",
port_id, dev_conf->rxmode.max_rx_pkt_len,
- (unsigned)ETHER_MIN_LEN);
- return -EINVAL;
+ (unsigned int)RTE_ETHER_MIN_LEN);
+ ret = -EINVAL;
+ goto rollback;
}
} else {
- if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN ||
- dev_conf->rxmode.max_rx_pkt_len > ETHER_MAX_LEN)
+ if (dev_conf->rxmode.max_rx_pkt_len < RTE_ETHER_MIN_LEN ||
+ dev_conf->rxmode.max_rx_pkt_len > RTE_ETHER_MAX_LEN)
/* Use default value */
dev->data->dev_conf.rxmode.max_rx_pkt_len =
- ETHER_MAX_LEN;
+ RTE_ETHER_MAX_LEN;
}
/* Any requested offloading must be within its device capabilities */
- if ((local_conf.rxmode.offloads & dev_info.rx_offload_capa) !=
- local_conf.rxmode.offloads) {
+ if ((dev_conf->rxmode.offloads & dev_info.rx_offload_capa) !=
+ dev_conf->rxmode.offloads) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u requested Rx offloads 0x%"PRIx64" doesn't match Rx offloads "
"capabilities 0x%"PRIx64" in %s()\n",
- port_id, local_conf.rxmode.offloads,
+ port_id, dev_conf->rxmode.offloads,
dev_info.rx_offload_capa,
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
- if ((local_conf.txmode.offloads & dev_info.tx_offload_capa) !=
- local_conf.txmode.offloads) {
+ if ((dev_conf->txmode.offloads & dev_info.tx_offload_capa) !=
+ dev_conf->txmode.offloads) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u requested Tx offloads 0x%"PRIx64" doesn't match Tx offloads "
"capabilities 0x%"PRIx64" in %s()\n",
- port_id, local_conf.txmode.offloads,
+ port_id, dev_conf->txmode.offloads,
dev_info.tx_offload_capa,
__func__);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
/* Check that device supports requested rss hash functions. */
"Ethdev port_id=%u invalid rss_hf: 0x%"PRIx64", valid value: 0x%"PRIx64"\n",
port_id, dev_conf->rx_adv_conf.rss_conf.rss_hf,
dev_info.flow_type_rss_offloads);
- return -EINVAL;
+ ret = -EINVAL;
+ goto rollback;
}
/*
RTE_ETHDEV_LOG(ERR,
"Port%u rte_eth_dev_rx_queue_config = %d\n",
port_id, diag);
- return diag;
+ ret = diag;
+ goto rollback;
}
diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
"Port%u rte_eth_dev_tx_queue_config = %d\n",
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
- return diag;
+ ret = diag;
+ goto rollback;
}
diag = (*dev->dev_ops->dev_configure)(dev);
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
- return eth_err(port_id, diag);
+ ret = eth_err(port_id, diag);
+ goto rollback;
}
/* Initialize Rx profiling if enabled at compilation time. */
port_id, diag);
rte_eth_dev_rx_queue_config(dev, 0);
rte_eth_dev_tx_queue_config(dev, 0);
- return eth_err(port_id, diag);
+ ret = eth_err(port_id, diag);
+ goto rollback;
}
return 0;
+
+rollback:
+ memcpy(&dev->data->dev_conf, &orig_conf, sizeof(dev->data->dev_conf));
+
+ return ret;
}
void
rte_eth_dev_mac_restore(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
- struct ether_addr *addr;
+ struct rte_ether_addr *addr;
uint16_t i;
uint32_t pool = 0;
uint64_t pool_mask;
addr = &dev->data->mac_addrs[i];
/* skip zero address */
- if (is_zero_ether_addr(addr))
+ if (rte_is_zero_ether_addr(addr))
continue;
pool = 0;
return -EINVAL;
}
+ if (mp == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Invalid null mempool pointer\n");
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_setup, -ENOTSUP);
nb_rx_desc % dev_info.rx_desc_lim.nb_align != 0) {
RTE_ETHDEV_LOG(ERR,
- "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
+ "Invalid value for nb_rx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
nb_rx_desc, dev_info.rx_desc_lim.nb_max,
dev_info.rx_desc_lim.nb_min,
dev_info.rx_desc_lim.nb_align);
local_conf.offloads) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%d rx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
- "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
+ "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
port_id, rx_queue_id, local_conf.offloads,
dev_info.rx_queue_offload_capa,
__func__);
nb_tx_desc < dev_info.tx_desc_lim.nb_min ||
nb_tx_desc % dev_info.tx_desc_lim.nb_align != 0) {
RTE_ETHDEV_LOG(ERR,
- "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, = %hu, and a product of %hu\n",
+ "Invalid value for nb_tx_desc(=%hu), should be: <= %hu, >= %hu, and a product of %hu\n",
nb_tx_desc, dev_info.tx_desc_lim.nb_max,
dev_info.tx_desc_lim.nb_min,
dev_info.tx_desc_lim.nb_align);
local_conf.offloads) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%d tx_queue_id=%d, new added offloads 0x%"PRIx64" must be "
- "within pre-queue offload capabilities 0x%"PRIx64" in %s()\n",
+ "within per-queue offload capabilities 0x%"PRIx64" in %s()\n",
port_id, tx_queue_id, local_conf.offloads,
dev_info.tx_queue_offload_capa,
__func__);
uint16_t num_q;
for (idx = 0; idx < RTE_NB_STATS; idx++) {
- snprintf(xstats_names[cnt_used_entries].name,
- sizeof(xstats_names[0].name),
- "%s", rte_stats_strings[idx].name);
+ strlcpy(xstats_names[cnt_used_entries].name,
+ rte_stats_strings[idx].name,
+ sizeof(xstats_names[0].name));
cnt_used_entries++;
}
num_q = RTE_MIN(dev->data->nb_rx_queues, RTE_ETHDEV_QUEUE_STAT_CNTRS);
.nb_max = UINT16_MAX,
.nb_min = 0,
.nb_align = 1,
+ .nb_seg_max = UINT16_MAX,
+ .nb_mtu_seg_max = UINT16_MAX,
};
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev_info->rx_desc_lim = lim;
dev_info->tx_desc_lim = lim;
dev_info->device = dev->device;
+ dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->max_mtu = UINT16_MAX;
RTE_FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
}
void
-rte_eth_macaddr_get(uint16_t port_id, struct ether_addr *mac_addr)
+rte_eth_macaddr_get(uint16_t port_id, struct rte_ether_addr *mac_addr)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_RET(port_id);
dev = &rte_eth_devices[port_id];
- ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
+ rte_ether_addr_copy(&dev->data->mac_addrs[0], mac_addr);
}
rte_eth_dev_set_mtu(uint16_t port_id, uint16_t mtu)
{
int ret;
+ struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mtu_set, -ENOTSUP);
+ /*
+ * Check if the device supports dev_infos_get, if it does not
+ * skip min_mtu/max_mtu validation here as this requires values
+ * that are populated within the call to rte_eth_dev_info_get()
+ * which relies on dev->dev_ops->dev_infos_get.
+ */
+ if (*dev->dev_ops->dev_infos_get != NULL) {
+ rte_eth_dev_info_get(port_id, &dev_info);
+ if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
+ return -EINVAL;
+ }
+
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
if (!ret)
dev->data->mtu = mtu;
* an empty spot.
*/
static int
-get_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
+get_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
rte_eth_dev_info_get(port_id, &dev_info);
for (i = 0; i < dev_info.max_mac_addrs; i++)
- if (memcmp(addr, &dev->data->mac_addrs[i], ETHER_ADDR_LEN) == 0)
+ if (memcmp(addr, &dev->data->mac_addrs[i],
+ RTE_ETHER_ADDR_LEN) == 0)
return i;
return -1;
}
-static const struct ether_addr null_mac_addr;
+static const struct rte_ether_addr null_mac_addr;
int
-rte_eth_dev_mac_addr_add(uint16_t port_id, struct ether_addr *addr,
+rte_eth_dev_mac_addr_add(uint16_t port_id, struct rte_ether_addr *addr,
uint32_t pool)
{
struct rte_eth_dev *dev;
dev = &rte_eth_devices[port_id];
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->mac_addr_add, -ENOTSUP);
- if (is_zero_ether_addr(addr)) {
+ if (rte_is_zero_ether_addr(addr)) {
RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
port_id);
return -EINVAL;
if (ret == 0) {
/* Update address in NIC data structure */
- ether_addr_copy(addr, &dev->data->mac_addrs[index]);
+ rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
/* Update pool bitmap in NIC data structure */
dev->data->mac_pool_sel[index] |= (1ULL << pool);
}
int
-rte_eth_dev_mac_addr_remove(uint16_t port_id, struct ether_addr *addr)
+rte_eth_dev_mac_addr_remove(uint16_t port_id, struct rte_ether_addr *addr)
{
struct rte_eth_dev *dev;
int index;
(*dev->dev_ops->mac_addr_remove)(dev, index);
/* Update address in NIC data structure */
- ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
+ rte_ether_addr_copy(&null_mac_addr, &dev->data->mac_addrs[index]);
/* reset pool bitmap */
dev->data->mac_pool_sel[index] = 0;
}
int
-rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct ether_addr *addr)
+rte_eth_dev_default_mac_addr_set(uint16_t port_id, struct rte_ether_addr *addr)
{
struct rte_eth_dev *dev;
int ret;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
- if (!is_valid_assigned_ether_addr(addr))
+ if (!rte_is_valid_assigned_ether_addr(addr))
return -EINVAL;
dev = &rte_eth_devices[port_id];
return ret;
/* Update default address in NIC data structure */
- ether_addr_copy(addr, &dev->data->mac_addrs[0]);
+ rte_ether_addr_copy(addr, &dev->data->mac_addrs[0]);
return 0;
}
* an empty spot.
*/
static int
-get_hash_mac_addr_index(uint16_t port_id, const struct ether_addr *addr)
+get_hash_mac_addr_index(uint16_t port_id, const struct rte_ether_addr *addr)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev = &rte_eth_devices[port_id];
for (i = 0; i < dev_info.max_hash_mac_addrs; i++)
if (memcmp(addr, &dev->data->hash_mac_addrs[i],
- ETHER_ADDR_LEN) == 0)
+ RTE_ETHER_ADDR_LEN) == 0)
return i;
return -1;
}
int
-rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct ether_addr *addr,
+rte_eth_dev_uc_hash_table_set(uint16_t port_id, struct rte_ether_addr *addr,
uint8_t on)
{
int index;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (is_zero_ether_addr(addr)) {
+ if (rte_is_zero_ether_addr(addr)) {
RTE_ETHDEV_LOG(ERR, "Port %u: Cannot add NULL MAC address\n",
port_id);
return -EINVAL;
if (ret == 0) {
/* Update address in NIC data structure */
if (on)
- ether_addr_copy(addr,
+ rte_ether_addr_copy(addr,
&dev->data->hash_mac_addrs[index]);
else
- ether_addr_copy(&null_mac_addr,
+ rte_ether_addr_copy(&null_mac_addr,
&dev->data->hash_mac_addrs[index]);
}
{
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
+ int rc;
- snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
- dev->data->port_id, queue_id, ring_name);
+ rc = snprintf(z_name, sizeof(z_name), "eth_p%d_q%d_%s",
+ dev->data->port_id, queue_id, ring_name);
+ if (rc >= RTE_MEMZONE_NAMESIZE) {
+ RTE_ETHDEV_LOG(ERR, "ring name too long\n");
+ rte_errno = ENAMETOOLONG;
+ return NULL;
+ }
mz = rte_memzone_lookup(z_name);
if (mz)
return -ENODEV;
RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
- if (ethdev_uninit) {
- ret = ethdev_uninit(ethdev);
- if (ret)
- return ret;
- }
+
+ ret = ethdev_uninit(ethdev);
+ if (ret)
+ return ret;
return rte_eth_dev_release_port(ethdev);
}
int
rte_eth_dev_set_mc_addr_list(uint16_t port_id,
- struct ether_addr *mc_addr_set,
+ struct rte_ether_addr *mc_addr_set,
uint32_t nb_mc_addr)
{
struct rte_eth_dev *dev;
timestamp));
}
+int
+rte_eth_read_clock(uint16_t port_id, uint64_t *clock)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->read_clock, -ENOTSUP);
+ return eth_err(port_id, (*dev->dev_ops->read_clock)(dev, clock));
+}
+
int
rte_eth_dev_get_reg_info(uint16_t port_id, struct rte_dev_reg_info *info)
{