#include <rte_errno.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
-#include <rte_kvargs.h>
#include <rte_class.h>
#include <rte_ether.h>
#include <rte_telemetry.h>
#include "ethdev_profile.h"
#include "ethdev_private.h"
-static const char *MZ_RTE_ETH_DEV_DATA = "rte_eth_dev_data";
struct rte_eth_dev rte_eth_devices[RTE_MAX_ETHPORTS];
/* public fast-path API */
struct rte_eth_fp_ops rte_eth_fp_ops[RTE_MAX_ETHPORTS];
-/* spinlock for eth device callbacks */
-static rte_spinlock_t eth_dev_cb_lock = RTE_SPINLOCK_INITIALIZER;
-
-/* spinlock for add/remove rx callbacks */
+/* spinlock for add/remove Rx callbacks */
static rte_spinlock_t eth_dev_rx_cb_lock = RTE_SPINLOCK_INITIALIZER;
-/* spinlock for add/remove tx callbacks */
+/* spinlock for add/remove Tx callbacks */
static rte_spinlock_t eth_dev_tx_cb_lock = RTE_SPINLOCK_INITIALIZER;
-/* spinlock for shared data allocation */
-static rte_spinlock_t eth_dev_shared_data_lock = RTE_SPINLOCK_INITIALIZER;
-
/* store statistics names and its offset in stats structure */
struct rte_eth_xstats_name_off {
char name[RTE_ETH_XSTATS_NAME_SIZE];
unsigned offset;
};
-/* Shared memory between primary and secondary processes. */
-static struct {
- uint64_t next_owner_id;
- rte_spinlock_t ownership_lock;
- struct rte_eth_dev_data data[RTE_MAX_ETHPORTS];
-} *eth_dev_shared_data;
-
static const struct rte_eth_xstats_name_off eth_dev_stats_strings[] = {
{"rx_good_packets", offsetof(struct rte_eth_stats, ipackets)},
{"tx_good_packets", offsetof(struct rte_eth_stats, opackets)},
#define RTE_NB_TXQ_STATS RTE_DIM(eth_dev_txq_stats_strings)
#define RTE_RX_OFFLOAD_BIT2STR(_name) \
- { DEV_RX_OFFLOAD_##_name, #_name }
-
-#define RTE_ETH_RX_OFFLOAD_BIT2STR(_name) \
{ RTE_ETH_RX_OFFLOAD_##_name, #_name }
static const struct {
RTE_RX_OFFLOAD_BIT2STR(HEADER_SPLIT),
RTE_RX_OFFLOAD_BIT2STR(VLAN_FILTER),
RTE_RX_OFFLOAD_BIT2STR(VLAN_EXTEND),
- RTE_RX_OFFLOAD_BIT2STR(JUMBO_FRAME),
RTE_RX_OFFLOAD_BIT2STR(SCATTER),
RTE_RX_OFFLOAD_BIT2STR(TIMESTAMP),
RTE_RX_OFFLOAD_BIT2STR(SECURITY),
RTE_RX_OFFLOAD_BIT2STR(SCTP_CKSUM),
RTE_RX_OFFLOAD_BIT2STR(OUTER_UDP_CKSUM),
RTE_RX_OFFLOAD_BIT2STR(RSS_HASH),
- RTE_ETH_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
+ RTE_RX_OFFLOAD_BIT2STR(BUFFER_SPLIT),
};
#undef RTE_RX_OFFLOAD_BIT2STR
#undef RTE_ETH_RX_OFFLOAD_BIT2STR
#define RTE_TX_OFFLOAD_BIT2STR(_name) \
- { DEV_TX_OFFLOAD_##_name, #_name }
+ { RTE_ETH_TX_OFFLOAD_##_name, #_name }
static const struct {
uint64_t offload;
#undef RTE_TX_OFFLOAD_BIT2STR
-/**
- * The user application callback description.
- *
- * It contains callback address to be registered by user application,
- * the pointer to the parameters for callback, and the event type.
- */
-struct rte_eth_dev_callback {
- TAILQ_ENTRY(rte_eth_dev_callback) next; /**< Callbacks list */
- rte_eth_dev_cb_fn cb_fn; /**< Callback address */
- void *cb_arg; /**< Parameter for callback */
- void *ret_param; /**< Return parameter */
- enum rte_eth_event_type event; /**< Interrupt event type */
- uint32_t active; /**< Callback is executing */
+static const struct {
+ uint64_t offload;
+ const char *name;
+} rte_eth_dev_capa_names[] = {
+ {RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP, "RUNTIME_RX_QUEUE_SETUP"},
+ {RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP, "RUNTIME_TX_QUEUE_SETUP"},
+ {RTE_ETH_DEV_CAPA_RXQ_SHARE, "RXQ_SHARE"},
+ {RTE_ETH_DEV_CAPA_FLOW_RULE_KEEP, "FLOW_RULE_KEEP"},
+ {RTE_ETH_DEV_CAPA_FLOW_SHARED_OBJECT_KEEP, "FLOW_SHARED_OBJECT_KEEP"},
};
enum {
rte_eth_devices[ref_port_id].device);
}
-static void
-eth_dev_shared_data_prepare(void)
-{
- const unsigned flags = 0;
- const struct rte_memzone *mz;
-
- rte_spinlock_lock(ð_dev_shared_data_lock);
-
- if (eth_dev_shared_data == NULL) {
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- /* Allocate port data and ownership shared memory. */
- mz = rte_memzone_reserve(MZ_RTE_ETH_DEV_DATA,
- sizeof(*eth_dev_shared_data),
- rte_socket_id(), flags);
- } else
- mz = rte_memzone_lookup(MZ_RTE_ETH_DEV_DATA);
- if (mz == NULL)
- rte_panic("Cannot allocate ethdev shared data\n");
-
- eth_dev_shared_data = mz->addr;
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- eth_dev_shared_data->next_owner_id =
- RTE_ETH_DEV_NO_OWNER + 1;
- rte_spinlock_init(ð_dev_shared_data->ownership_lock);
- memset(eth_dev_shared_data->data, 0,
- sizeof(eth_dev_shared_data->data));
- }
- }
-
- rte_spinlock_unlock(ð_dev_shared_data_lock);
-}
-
static bool
eth_dev_is_allocated(const struct rte_eth_dev *ethdev)
{
return ethdev->data->name[0] != '\0';
}
-static struct rte_eth_dev *
-eth_dev_allocated(const char *name)
-{
- uint16_t i;
-
- RTE_BUILD_BUG_ON(RTE_MAX_ETHPORTS >= UINT16_MAX);
-
- for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (rte_eth_devices[i].data != NULL &&
- strcmp(rte_eth_devices[i].data->name, name) == 0)
- return &rte_eth_devices[i];
- }
- return NULL;
-}
-
-struct rte_eth_dev *
-rte_eth_dev_allocated(const char *name)
-{
- struct rte_eth_dev *ethdev;
-
- eth_dev_shared_data_prepare();
-
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
-
- ethdev = eth_dev_allocated(name);
-
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
-
- return ethdev;
-}
-
-static uint16_t
-eth_dev_find_free_port(void)
-{
- uint16_t i;
-
- for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- /* Using shared name field to find a free port. */
- if (eth_dev_shared_data->data[i].name[0] == '\0') {
- RTE_ASSERT(rte_eth_devices[i].state ==
- RTE_ETH_DEV_UNUSED);
- return i;
- }
- }
- return RTE_MAX_ETHPORTS;
-}
-
-static struct rte_eth_dev *
-eth_dev_get(uint16_t port_id)
-{
- struct rte_eth_dev *eth_dev = &rte_eth_devices[port_id];
-
- eth_dev->data = ð_dev_shared_data->data[port_id];
-
- return eth_dev;
-}
-
-struct rte_eth_dev *
-rte_eth_dev_allocate(const char *name)
-{
- uint16_t port_id;
- struct rte_eth_dev *eth_dev = NULL;
- size_t name_len;
-
- name_len = strnlen(name, RTE_ETH_NAME_MAX_LEN);
- if (name_len == 0) {
- RTE_ETHDEV_LOG(ERR, "Zero length Ethernet device name\n");
- return NULL;
- }
-
- if (name_len >= RTE_ETH_NAME_MAX_LEN) {
- RTE_ETHDEV_LOG(ERR, "Ethernet device name is too long\n");
- return NULL;
- }
-
- eth_dev_shared_data_prepare();
-
- /* Synchronize port creation between primary and secondary threads. */
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
-
- if (eth_dev_allocated(name) != NULL) {
- RTE_ETHDEV_LOG(ERR,
- "Ethernet device with name %s already allocated\n",
- name);
- goto unlock;
- }
-
- port_id = eth_dev_find_free_port();
- if (port_id == RTE_MAX_ETHPORTS) {
- RTE_ETHDEV_LOG(ERR,
- "Reached maximum number of Ethernet ports\n");
- goto unlock;
- }
-
- eth_dev = eth_dev_get(port_id);
- strlcpy(eth_dev->data->name, name, sizeof(eth_dev->data->name));
- eth_dev->data->port_id = port_id;
- eth_dev->data->backer_port_id = RTE_MAX_ETHPORTS;
- eth_dev->data->mtu = RTE_ETHER_MTU;
- pthread_mutex_init(ð_dev->data->flow_ops_mutex, NULL);
-
-unlock:
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
-
- return eth_dev;
-}
-
-/*
- * Attach to a port already registered by the primary process, which
- * makes sure that the same device would have the same port id both
- * in the primary and secondary process.
- */
-struct rte_eth_dev *
-rte_eth_dev_attach_secondary(const char *name)
-{
- uint16_t i;
- struct rte_eth_dev *eth_dev = NULL;
-
- eth_dev_shared_data_prepare();
-
- /* Synchronize port attachment to primary port creation and release. */
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
-
- for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (strcmp(eth_dev_shared_data->data[i].name, name) == 0)
- break;
- }
- if (i == RTE_MAX_ETHPORTS) {
- RTE_ETHDEV_LOG(ERR,
- "Device %s is not driven by the primary process\n",
- name);
- } else {
- eth_dev = eth_dev_get(i);
- RTE_ASSERT(eth_dev->data->port_id == i);
- }
-
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
- return eth_dev;
-}
-
-int
-rte_eth_dev_release_port(struct rte_eth_dev *eth_dev)
-{
- if (eth_dev == NULL)
- return -EINVAL;
-
- eth_dev_shared_data_prepare();
-
- if (eth_dev->state != RTE_ETH_DEV_UNUSED)
- rte_eth_dev_callback_process(eth_dev,
- RTE_ETH_EVENT_DESTROY, NULL);
-
- eth_dev_fp_ops_reset(rte_eth_fp_ops + eth_dev->data->port_id);
-
- rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
-
- eth_dev->state = RTE_ETH_DEV_UNUSED;
- eth_dev->device = NULL;
- eth_dev->process_private = NULL;
- eth_dev->intr_handle = NULL;
- eth_dev->rx_pkt_burst = NULL;
- eth_dev->tx_pkt_burst = NULL;
- eth_dev->tx_pkt_prepare = NULL;
- eth_dev->rx_queue_count = NULL;
- eth_dev->rx_descriptor_status = NULL;
- eth_dev->tx_descriptor_status = NULL;
- eth_dev->dev_ops = NULL;
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- rte_free(eth_dev->data->rx_queues);
- rte_free(eth_dev->data->tx_queues);
- rte_free(eth_dev->data->mac_addrs);
- rte_free(eth_dev->data->hash_mac_addrs);
- rte_free(eth_dev->data->dev_private);
- pthread_mutex_destroy(ð_dev->data->flow_ops_mutex);
- memset(eth_dev->data, 0, sizeof(struct rte_eth_dev_data));
- }
-
- rte_spinlock_unlock(ð_dev_shared_data->ownership_lock);
-
- return 0;
-}
-
int
rte_eth_dev_is_valid_port(uint16_t port_id)
{
struct rte_eth_dev_owner *port_owner;
if (port_id >= RTE_MAX_ETHPORTS || !eth_dev_is_allocated(ethdev)) {
- RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
+ RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
port_id);
return -ENODEV;
}
rte_spinlock_lock(ð_dev_shared_data->ownership_lock);
if (eth_is_valid_owner_id(owner_id)) {
- for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++)
- if (rte_eth_devices[port_id].data->owner.id == owner_id)
- memset(&rte_eth_devices[port_id].data->owner, 0,
+ for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
+ struct rte_eth_dev_data *data =
+ rte_eth_devices[port_id].data;
+ if (data != NULL && data->owner.id == owner_id)
+ memset(&data->owner, 0,
sizeof(struct rte_eth_dev_owner));
+ }
RTE_ETHDEV_LOG(NOTICE,
"All port owners owned by %016"PRIx64" identifier have removed\n",
owner_id);
} else {
RTE_ETHDEV_LOG(ERR,
- "Invalid owner id=%016"PRIx64"\n",
+ "Invalid owner ID=%016"PRIx64"\n",
owner_id);
ret = -EINVAL;
}
ethdev = &rte_eth_devices[port_id];
if (!eth_dev_is_allocated(ethdev)) {
- RTE_ETHDEV_LOG(ERR, "Port id %"PRIu16" is not allocated\n",
+ RTE_ETHDEV_LOG(ERR, "Port ID %"PRIu16" is not allocated\n",
port_id);
return -ENODEV;
}
return ret;
}
-static void
-eth_dev_rxq_release(struct rte_eth_dev *dev, uint16_t qid)
-{
- void **rxq = dev->data->rx_queues;
-
- if (rxq[qid] == NULL)
- return;
-
- if (dev->dev_ops->rx_queue_release != NULL)
- (*dev->dev_ops->rx_queue_release)(dev, qid);
- rxq[qid] = NULL;
-}
-
-static void
-eth_dev_txq_release(struct rte_eth_dev *dev, uint16_t qid)
-{
- void **txq = dev->data->tx_queues;
-
- if (txq[qid] == NULL)
- return;
-
- if (dev->dev_ops->tx_queue_release != NULL)
- (*dev->dev_ops->tx_queue_release)(dev, qid);
- txq[qid] = NULL;
-}
-
-static int
-eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
-{
- uint16_t old_nb_queues = dev->data->nb_rx_queues;
- unsigned i;
-
- if (dev->data->rx_queues == NULL && nb_queues != 0) { /* first time configuration */
- dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
- sizeof(dev->data->rx_queues[0]) *
- RTE_MAX_QUEUES_PER_PORT,
- RTE_CACHE_LINE_SIZE);
- if (dev->data->rx_queues == NULL) {
- dev->data->nb_rx_queues = 0;
- return -(ENOMEM);
- }
- } else if (dev->data->rx_queues != NULL && nb_queues != 0) { /* re-configure */
- for (i = nb_queues; i < old_nb_queues; i++)
- eth_dev_rxq_release(dev, i);
-
- } else if (dev->data->rx_queues != NULL && nb_queues == 0) {
- for (i = nb_queues; i < old_nb_queues; i++)
- eth_dev_rxq_release(dev, i);
-
- rte_free(dev->data->rx_queues);
- dev->data->rx_queues = NULL;
- }
- dev->data->nb_rx_queues = nb_queues;
- return 0;
-}
-
static int
eth_dev_validate_rx_queue(const struct rte_eth_dev *dev, uint16_t rx_queue_id)
{
return eth_err(port_id, dev->dev_ops->tx_queue_stop(dev, tx_queue_id));
}
-static int
-eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
-{
- uint16_t old_nb_queues = dev->data->nb_tx_queues;
- unsigned i;
-
- if (dev->data->tx_queues == NULL && nb_queues != 0) { /* first time configuration */
- dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
- sizeof(dev->data->tx_queues[0]) *
- RTE_MAX_QUEUES_PER_PORT,
- RTE_CACHE_LINE_SIZE);
- if (dev->data->tx_queues == NULL) {
- dev->data->nb_tx_queues = 0;
- return -(ENOMEM);
- }
- } else if (dev->data->tx_queues != NULL && nb_queues != 0) { /* re-configure */
- for (i = nb_queues; i < old_nb_queues; i++)
- eth_dev_txq_release(dev, i);
-
- } else if (dev->data->tx_queues != NULL && nb_queues == 0) {
- for (i = nb_queues; i < old_nb_queues; i++)
- eth_dev_txq_release(dev, i);
-
- rte_free(dev->data->tx_queues);
- dev->data->tx_queues = NULL;
- }
- dev->data->nb_tx_queues = nb_queues;
- return 0;
-}
-
uint32_t
rte_eth_speed_bitflag(uint32_t speed, int duplex)
{
switch (speed) {
- case ETH_SPEED_NUM_10M:
- return duplex ? ETH_LINK_SPEED_10M : ETH_LINK_SPEED_10M_HD;
- case ETH_SPEED_NUM_100M:
- return duplex ? ETH_LINK_SPEED_100M : ETH_LINK_SPEED_100M_HD;
- case ETH_SPEED_NUM_1G:
- return ETH_LINK_SPEED_1G;
- case ETH_SPEED_NUM_2_5G:
- return ETH_LINK_SPEED_2_5G;
- case ETH_SPEED_NUM_5G:
- return ETH_LINK_SPEED_5G;
- case ETH_SPEED_NUM_10G:
- return ETH_LINK_SPEED_10G;
- case ETH_SPEED_NUM_20G:
- return ETH_LINK_SPEED_20G;
- case ETH_SPEED_NUM_25G:
- return ETH_LINK_SPEED_25G;
- case ETH_SPEED_NUM_40G:
- return ETH_LINK_SPEED_40G;
- case ETH_SPEED_NUM_50G:
- return ETH_LINK_SPEED_50G;
- case ETH_SPEED_NUM_56G:
- return ETH_LINK_SPEED_56G;
- case ETH_SPEED_NUM_100G:
- return ETH_LINK_SPEED_100G;
- case ETH_SPEED_NUM_200G:
- return ETH_LINK_SPEED_200G;
+ case RTE_ETH_SPEED_NUM_10M:
+ return duplex ? RTE_ETH_LINK_SPEED_10M : RTE_ETH_LINK_SPEED_10M_HD;
+ case RTE_ETH_SPEED_NUM_100M:
+ return duplex ? RTE_ETH_LINK_SPEED_100M : RTE_ETH_LINK_SPEED_100M_HD;
+ case RTE_ETH_SPEED_NUM_1G:
+ return RTE_ETH_LINK_SPEED_1G;
+ case RTE_ETH_SPEED_NUM_2_5G:
+ return RTE_ETH_LINK_SPEED_2_5G;
+ case RTE_ETH_SPEED_NUM_5G:
+ return RTE_ETH_LINK_SPEED_5G;
+ case RTE_ETH_SPEED_NUM_10G:
+ return RTE_ETH_LINK_SPEED_10G;
+ case RTE_ETH_SPEED_NUM_20G:
+ return RTE_ETH_LINK_SPEED_20G;
+ case RTE_ETH_SPEED_NUM_25G:
+ return RTE_ETH_LINK_SPEED_25G;
+ case RTE_ETH_SPEED_NUM_40G:
+ return RTE_ETH_LINK_SPEED_40G;
+ case RTE_ETH_SPEED_NUM_50G:
+ return RTE_ETH_LINK_SPEED_50G;
+ case RTE_ETH_SPEED_NUM_56G:
+ return RTE_ETH_LINK_SPEED_56G;
+ case RTE_ETH_SPEED_NUM_100G:
+ return RTE_ETH_LINK_SPEED_100G;
+ case RTE_ETH_SPEED_NUM_200G:
+ return RTE_ETH_LINK_SPEED_200G;
default:
return 0;
}
return name;
}
+const char *
+rte_eth_dev_capability_name(uint64_t capability)
+{
+ const char *name = "UNKNOWN";
+ unsigned int i;
+
+ for (i = 0; i < RTE_DIM(rte_eth_dev_capa_names); ++i) {
+ if (capability == rte_eth_dev_capa_names[i].offload) {
+ name = rte_eth_dev_capa_names[i].name;
+ break;
+ }
+ }
+
+ return name;
+}
+
static inline int
eth_dev_check_lro_pkt_size(uint16_t port_id, uint32_t config_size,
uint32_t max_rx_pkt_len, uint32_t dev_info_size)
/*
* Validate offloads that are requested through rte_eth_dev_configure against
- * the offloads successfully set by the ethernet device.
+ * the offloads successfully set by the Ethernet device.
*
* @param port_id
* The port identifier of the Ethernet device.
* @param req_offloads
* The offloads that have been requested through `rte_eth_dev_configure`.
* @param set_offloads
- * The offloads successfully set by the ethernet device.
+ * The offloads successfully set by the Ethernet device.
* @param offload_type
* The offload type i.e. Rx/Tx string.
* @param offload_name
while (offloads_diff != 0) {
/* Check if any offload is requested but not enabled. */
- offload = 1ULL << __builtin_ctzll(offloads_diff);
+ offload = RTE_BIT64(__builtin_ctzll(offloads_diff));
if (offload & req_offloads) {
RTE_ETHDEV_LOG(ERR,
"Port %u failed to enable %s offload %s\n",
return overhead_len;
}
+/* rte_eth_dev_info_get() should be called prior to this function */
+static int
+eth_dev_validate_mtu(uint16_t port_id, struct rte_eth_dev_info *dev_info,
+ uint16_t mtu)
+{
+ uint32_t overhead_len;
+ uint32_t frame_size;
+
+ if (mtu < dev_info->min_mtu) {
+ RTE_ETHDEV_LOG(ERR,
+ "MTU (%u) < device min MTU (%u) for port_id %u\n",
+ mtu, dev_info->min_mtu, port_id);
+ return -EINVAL;
+ }
+ if (mtu > dev_info->max_mtu) {
+ RTE_ETHDEV_LOG(ERR,
+ "MTU (%u) > device max MTU (%u) for port_id %u\n",
+ mtu, dev_info->max_mtu, port_id);
+ return -EINVAL;
+ }
+
+ overhead_len = eth_dev_get_overhead_len(dev_info->max_rx_pktlen,
+ dev_info->max_mtu);
+ frame_size = mtu + overhead_len;
+ if (frame_size < RTE_ETHER_MIN_LEN) {
+ RTE_ETHDEV_LOG(ERR,
+ "Frame size (%u) < min frame size (%u) for port_id %u\n",
+ frame_size, RTE_ETHER_MIN_LEN, port_id);
+ return -EINVAL;
+ }
+
+ if (frame_size > dev_info->max_rx_pktlen) {
+ RTE_ETHDEV_LOG(ERR,
+ "Frame size (%u) > device max frame size (%u) for port_id %u\n",
+ frame_size, dev_info->max_rx_pktlen, port_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int
rte_eth_dev_configure(uint16_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
struct rte_eth_dev *dev;
struct rte_eth_dev_info dev_info;
struct rte_eth_conf orig_conf;
- uint32_t max_rx_pktlen;
- uint32_t overhead_len;
int diag;
int ret;
uint16_t old_mtu;
if (ret != 0)
goto rollback;
- /* Get the real Ethernet overhead length */
- overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
- dev_info.max_mtu);
-
/* If number of queues specified by application for both Rx and Tx is
* zero, use driver preferred values. This cannot be done individually
* as it is valid for either Tx or Rx (but not both) to be zero.
if (nb_rx_q > RTE_MAX_QUEUES_PER_PORT) {
RTE_ETHDEV_LOG(ERR,
- "Number of RX queues requested (%u) is greater than max supported(%d)\n",
+ "Number of Rx queues requested (%u) is greater than max supported(%d)\n",
nb_rx_q, RTE_MAX_QUEUES_PER_PORT);
ret = -EINVAL;
goto rollback;
if (nb_tx_q > RTE_MAX_QUEUES_PER_PORT) {
RTE_ETHDEV_LOG(ERR,
- "Number of TX queues requested (%u) is greater than max supported(%d)\n",
+ "Number of Tx queues requested (%u) is greater than max supported(%d)\n",
nb_tx_q, RTE_MAX_QUEUES_PER_PORT);
ret = -EINVAL;
goto rollback;
}
/*
- * Check that the numbers of RX and TX queues are not greater
- * than the maximum number of RX and TX queues supported by the
+ * Check that the numbers of Rx and Tx queues are not greater
+ * than the maximum number of Rx and Tx queues supported by the
* configured device.
*/
if (nb_rx_q > dev_info.max_rx_queues) {
goto rollback;
}
- /*
- * Check that the maximum RX packet length is supported by the
- * configured device.
- */
if (dev_conf->rxmode.mtu == 0)
dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
- max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
- if (max_rx_pktlen > dev_info.max_rx_pktlen) {
- RTE_ETHDEV_LOG(ERR,
- "Ethdev port_id=%u max_rx_pktlen %u > max valid value %u\n",
- port_id, max_rx_pktlen, dev_info.max_rx_pktlen);
- ret = -EINVAL;
- goto rollback;
- } else if (max_rx_pktlen < RTE_ETHER_MIN_LEN) {
- RTE_ETHDEV_LOG(ERR,
- "Ethdev port_id=%u max_rx_pktlen %u < min valid value %u\n",
- port_id, max_rx_pktlen, RTE_ETHER_MIN_LEN);
- ret = -EINVAL;
- goto rollback;
- }
- if ((dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) == 0) {
- if (dev->data->dev_conf.rxmode.mtu < RTE_ETHER_MIN_MTU ||
- dev->data->dev_conf.rxmode.mtu > RTE_ETHER_MTU)
- /* Use default value */
- dev->data->dev_conf.rxmode.mtu = RTE_ETHER_MTU;
- }
+ ret = eth_dev_validate_mtu(port_id, &dev_info,
+ dev->data->dev_conf.rxmode.mtu);
+ if (ret != 0)
+ goto rollback;
dev->data->mtu = dev->data->dev_conf.rxmode.mtu;
* If LRO is enabled, check that the maximum aggregated packet
* size is supported by the configured device.
*/
- if (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
+ uint32_t max_rx_pktlen;
+ uint32_t overhead_len;
+
+ overhead_len = eth_dev_get_overhead_len(dev_info.max_rx_pktlen,
+ dev_info.max_mtu);
+ max_rx_pktlen = dev->data->dev_conf.rxmode.mtu + overhead_len;
if (dev_conf->rxmode.max_lro_pkt_size == 0)
dev->data->dev_conf.rxmode.max_lro_pkt_size = max_rx_pktlen;
ret = eth_dev_check_lro_pkt_size(port_id,
}
/* Check if Rx RSS distribution is disabled but RSS hash is enabled. */
- if (((dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) == 0) &&
- (dev_conf->rxmode.offloads & DEV_RX_OFFLOAD_RSS_HASH)) {
+ if (((dev_conf->rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG) == 0) &&
+ (dev_conf->rxmode.offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)) {
RTE_ETHDEV_LOG(ERR,
"Ethdev port_id=%u config invalid Rx mq_mode without RSS but %s offload is requested\n",
port_id,
- rte_eth_dev_rx_offload_name(DEV_RX_OFFLOAD_RSS_HASH));
+ rte_eth_dev_rx_offload_name(RTE_ETH_RX_OFFLOAD_RSS_HASH));
ret = -EINVAL;
goto rollback;
}
/*
- * Setup new number of RX/TX queues and reconfigure device.
+ * Setup new number of Rx/Tx queues and reconfigure device.
*/
diag = eth_dev_rx_queue_config(dev, nb_rx_q);
if (diag != 0) {
return ret;
}
-void
-rte_eth_dev_internal_reset(struct rte_eth_dev *dev)
-{
- if (dev->data->dev_started) {
- RTE_ETHDEV_LOG(ERR, "Port %u must be stopped to allow reset\n",
- dev->data->port_id);
- return;
- }
-
- eth_dev_rx_queue_config(dev, 0);
- eth_dev_tx_queue_config(dev, 0);
-
- memset(&dev->data->dev_conf, 0, sizeof(dev->data->dev_conf));
-}
-
static void
eth_dev_mac_restore(struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
pool_mask = dev->data->mac_pool_sel[i];
do {
- if (pool_mask & 1ULL)
+ if (pool_mask & UINT64_C(1))
(*dev->dev_ops->mac_addr_add)(dev,
addr, i, pool);
pool_mask >>= 1;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
+ if (dev->data->dev_started) {
+ RTE_ETHDEV_LOG(ERR, "Cannot close started device (port %u)\n",
+ port_id);
+ return -EINVAL;
+ }
+
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_close, -ENOTSUP);
*lasterr = (*dev->dev_ops->dev_close)(dev);
if (*lasterr != 0)
* for each segment specified in extended configuration.
*/
mp_first = rx_seg[0].mp;
- offset_mask = (1u << seg_capa->offset_align_log2) - 1;
+ offset_mask = RTE_BIT32(seg_capa->offset_align_log2) - 1;
for (seg_idx = 0; seg_idx < n_seg; seg_idx++) {
struct rte_mempool *mpl = rx_seg[seg_idx].mp;
uint32_t length = rx_seg[seg_idx].length;
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
return -EINVAL;
}
return -EINVAL;
}
+ if (local_conf.share_group > 0 &&
+ (dev_info.dev_capa & RTE_ETH_DEV_CAPA_RXQ_SHARE) == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Ethdev port_id=%d rx_queue_id=%d, enabled share_group=%hu while device doesn't support Rx queue share\n",
+ port_id, rx_queue_id, local_conf.share_group);
+ return -EINVAL;
+ }
+
/*
* If LRO is enabled, check that the maximum aggregated packet
* size is supported by the configured device.
*/
/* Get the real Ethernet overhead length */
- if (local_conf.offloads & DEV_RX_OFFLOAD_TCP_LRO) {
+ if (local_conf.offloads & RTE_ETH_RX_OFFLOAD_TCP_LRO) {
uint32_t overhead_len;
uint32_t max_rx_pktlen;
int ret;
dev = &rte_eth_devices[port_id];
if (rx_queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", rx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", rx_queue_id);
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
if (tx_queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
if (tx_queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", tx_queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", tx_queue_id);
return -EINVAL;
}
rte_eth_link_speed_to_str(uint32_t link_speed)
{
switch (link_speed) {
- case ETH_SPEED_NUM_NONE: return "None";
- case ETH_SPEED_NUM_10M: return "10 Mbps";
- case ETH_SPEED_NUM_100M: return "100 Mbps";
- case ETH_SPEED_NUM_1G: return "1 Gbps";
- case ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
- case ETH_SPEED_NUM_5G: return "5 Gbps";
- case ETH_SPEED_NUM_10G: return "10 Gbps";
- case ETH_SPEED_NUM_20G: return "20 Gbps";
- case ETH_SPEED_NUM_25G: return "25 Gbps";
- case ETH_SPEED_NUM_40G: return "40 Gbps";
- case ETH_SPEED_NUM_50G: return "50 Gbps";
- case ETH_SPEED_NUM_56G: return "56 Gbps";
- case ETH_SPEED_NUM_100G: return "100 Gbps";
- case ETH_SPEED_NUM_200G: return "200 Gbps";
- case ETH_SPEED_NUM_UNKNOWN: return "Unknown";
+ case RTE_ETH_SPEED_NUM_NONE: return "None";
+ case RTE_ETH_SPEED_NUM_10M: return "10 Mbps";
+ case RTE_ETH_SPEED_NUM_100M: return "100 Mbps";
+ case RTE_ETH_SPEED_NUM_1G: return "1 Gbps";
+ case RTE_ETH_SPEED_NUM_2_5G: return "2.5 Gbps";
+ case RTE_ETH_SPEED_NUM_5G: return "5 Gbps";
+ case RTE_ETH_SPEED_NUM_10G: return "10 Gbps";
+ case RTE_ETH_SPEED_NUM_20G: return "20 Gbps";
+ case RTE_ETH_SPEED_NUM_25G: return "25 Gbps";
+ case RTE_ETH_SPEED_NUM_40G: return "40 Gbps";
+ case RTE_ETH_SPEED_NUM_50G: return "50 Gbps";
+ case RTE_ETH_SPEED_NUM_56G: return "56 Gbps";
+ case RTE_ETH_SPEED_NUM_100G: return "100 Gbps";
+ case RTE_ETH_SPEED_NUM_200G: return "200 Gbps";
+ case RTE_ETH_SPEED_NUM_UNKNOWN: return "Unknown";
default: return "Invalid";
}
}
return -EINVAL;
}
- if (eth_link->link_status == ETH_LINK_DOWN)
+ if (eth_link->link_status == RTE_ETH_LINK_DOWN)
return snprintf(str, len, "Link down");
else
return snprintf(str, len, "Link up at %s %s %s",
rte_eth_link_speed_to_str(eth_link->link_speed),
- (eth_link->link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (eth_link->link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
"FDX" : "HDX",
- (eth_link->link_autoneg == ETH_LINK_AUTONEG) ?
+ (eth_link->link_autoneg == RTE_ETH_LINK_AUTONEG) ?
"Autoneg" : "Fixed");
}
dev_info->rx_desc_lim = lim;
dev_info->tx_desc_lim = lim;
dev_info->device = dev->device;
- dev_info->min_mtu = RTE_ETHER_MIN_MTU;
+ dev_info->min_mtu = RTE_ETHER_MIN_LEN - RTE_ETHER_HDR_LEN -
+ RTE_ETHER_CRC_LEN;
dev_info->max_mtu = UINT16_MAX;
RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->dev_infos_get, -ENOTSUP);
int ret;
struct rte_eth_dev_info dev_info;
struct rte_eth_dev *dev;
- int is_jumbo_frame_capable = 0;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
if (ret != 0)
return ret;
- if (mtu < dev_info.min_mtu || mtu > dev_info.max_mtu)
- return -EINVAL;
-
- if ((dev_info.rx_offload_capa & DEV_RX_OFFLOAD_JUMBO_FRAME) != 0)
- is_jumbo_frame_capable = 1;
+ ret = eth_dev_validate_mtu(port_id, &dev_info, mtu);
+ if (ret != 0)
+ return ret;
}
- if (mtu > RTE_ETHER_MTU && is_jumbo_frame_capable == 0)
+ if (dev->data->dev_configured == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Port %u must be configured before MTU set\n",
+ port_id);
return -EINVAL;
+ }
ret = (*dev->dev_ops->mtu_set)(dev, mtu);
- if (ret == 0) {
+ if (ret == 0)
dev->data->mtu = mtu;
- /* switch to jumbo mode if needed */
- if (mtu > RTE_ETHER_MTU)
- dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
- }
-
return eth_err(port_id, ret);
}
dev = &rte_eth_devices[port_id];
if (!(dev->data->dev_conf.rxmode.offloads &
- DEV_RX_OFFLOAD_VLAN_FILTER)) {
- RTE_ETHDEV_LOG(ERR, "Port %u: vlan-filtering disabled\n",
+ RTE_ETH_RX_OFFLOAD_VLAN_FILTER)) {
+ RTE_ETHDEV_LOG(ERR, "Port %u: VLAN-filtering disabled\n",
port_id);
return -ENOSYS;
}
vbit = vlan_id % 64;
if (on)
- vfc->ids[vidx] |= UINT64_C(1) << vbit;
+ vfc->ids[vidx] |= RTE_BIT64(vbit);
else
- vfc->ids[vidx] &= ~(UINT64_C(1) << vbit);
+ vfc->ids[vidx] &= ~RTE_BIT64(vbit);
}
return eth_err(port_id, ret);
dev_offloads = orig_offloads;
/* check which option changed by application */
- cur = !!(offload_mask & ETH_VLAN_STRIP_OFFLOAD);
- org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP);
+ cur = !!(offload_mask & RTE_ETH_VLAN_STRIP_OFFLOAD);
+ org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP);
if (cur != org) {
if (cur)
- dev_offloads |= DEV_RX_OFFLOAD_VLAN_STRIP;
+ dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
else
- dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
- mask |= ETH_VLAN_STRIP_MASK;
+ dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
+ mask |= RTE_ETH_VLAN_STRIP_MASK;
}
- cur = !!(offload_mask & ETH_VLAN_FILTER_OFFLOAD);
- org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER);
+ cur = !!(offload_mask & RTE_ETH_VLAN_FILTER_OFFLOAD);
+ org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER);
if (cur != org) {
if (cur)
- dev_offloads |= DEV_RX_OFFLOAD_VLAN_FILTER;
+ dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
else
- dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_FILTER;
- mask |= ETH_VLAN_FILTER_MASK;
+ dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_FILTER;
+ mask |= RTE_ETH_VLAN_FILTER_MASK;
}
- cur = !!(offload_mask & ETH_VLAN_EXTEND_OFFLOAD);
- org = !!(dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND);
+ cur = !!(offload_mask & RTE_ETH_VLAN_EXTEND_OFFLOAD);
+ org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND);
if (cur != org) {
if (cur)
- dev_offloads |= DEV_RX_OFFLOAD_VLAN_EXTEND;
+ dev_offloads |= RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
else
- dev_offloads &= ~DEV_RX_OFFLOAD_VLAN_EXTEND;
- mask |= ETH_VLAN_EXTEND_MASK;
+ dev_offloads &= ~RTE_ETH_RX_OFFLOAD_VLAN_EXTEND;
+ mask |= RTE_ETH_VLAN_EXTEND_MASK;
}
- cur = !!(offload_mask & ETH_QINQ_STRIP_OFFLOAD);
- org = !!(dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP);
+ cur = !!(offload_mask & RTE_ETH_QINQ_STRIP_OFFLOAD);
+ org = !!(dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP);
if (cur != org) {
if (cur)
- dev_offloads |= DEV_RX_OFFLOAD_QINQ_STRIP;
+ dev_offloads |= RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
else
- dev_offloads &= ~DEV_RX_OFFLOAD_QINQ_STRIP;
- mask |= ETH_QINQ_STRIP_MASK;
+ dev_offloads &= ~RTE_ETH_RX_OFFLOAD_QINQ_STRIP;
+ mask |= RTE_ETH_QINQ_STRIP_MASK;
}
/*no change*/
dev = &rte_eth_devices[port_id];
dev_offloads = &dev->data->dev_conf.rxmode.offloads;
- if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
- ret |= ETH_VLAN_STRIP_OFFLOAD;
+ if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP)
+ ret |= RTE_ETH_VLAN_STRIP_OFFLOAD;
- if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
- ret |= ETH_VLAN_FILTER_OFFLOAD;
+ if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_FILTER)
+ ret |= RTE_ETH_VLAN_FILTER_OFFLOAD;
- if (*dev_offloads & DEV_RX_OFFLOAD_VLAN_EXTEND)
- ret |= ETH_VLAN_EXTEND_OFFLOAD;
+ if (*dev_offloads & RTE_ETH_RX_OFFLOAD_VLAN_EXTEND)
+ ret |= RTE_ETH_VLAN_EXTEND_OFFLOAD;
- if (*dev_offloads & DEV_RX_OFFLOAD_QINQ_STRIP)
- ret |= ETH_QINQ_STRIP_OFFLOAD;
+ if (*dev_offloads & RTE_ETH_RX_OFFLOAD_QINQ_STRIP)
+ ret |= RTE_ETH_QINQ_STRIP_OFFLOAD;
return ret;
}
return -EINVAL;
}
- if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+ if (pfc_conf->priority > (RTE_ETH_DCB_NUM_USER_PRIORITIES - 1)) {
RTE_ETHDEV_LOG(ERR, "Invalid priority, only 0-7 allowed\n");
return -EINVAL;
}
}
static int
-eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size)
+validate_rx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
+ struct rte_eth_pfc_queue_conf *pfc_queue_conf)
{
- uint16_t i, num;
+ if ((pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) ||
+ (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
+ if (pfc_queue_conf->rx_pause.tx_qid >= dev_info->nb_tx_queues) {
+ RTE_ETHDEV_LOG(ERR,
+ "PFC Tx queue not in range for Rx pause requested:%d configured:%d\n",
+ pfc_queue_conf->rx_pause.tx_qid,
+ dev_info->nb_tx_queues);
+ return -EINVAL;
+ }
- num = (reta_size + RTE_RETA_GROUP_SIZE - 1) / RTE_RETA_GROUP_SIZE;
- for (i = 0; i < num; i++) {
- if (reta_conf[i].mask)
- return 0;
+ if (pfc_queue_conf->rx_pause.tc >= tc_max) {
+ RTE_ETHDEV_LOG(ERR,
+ "PFC TC not in range for Rx pause requested:%d max:%d\n",
+ pfc_queue_conf->rx_pause.tc, tc_max);
+ return -EINVAL;
+ }
}
- return -EINVAL;
+ return 0;
}
static int
-eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
- uint16_t reta_size,
- uint16_t max_rxq)
+validate_tx_pause_config(struct rte_eth_dev_info *dev_info, uint8_t tc_max,
+ struct rte_eth_pfc_queue_conf *pfc_queue_conf)
{
- uint16_t i, idx, shift;
+ if ((pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) ||
+ (pfc_queue_conf->mode == RTE_ETH_FC_FULL)) {
+ if (pfc_queue_conf->tx_pause.rx_qid >= dev_info->nb_rx_queues) {
+ RTE_ETHDEV_LOG(ERR,
+ "PFC Rx queue not in range for Tx pause requested:%d configured:%d\n",
+ pfc_queue_conf->tx_pause.rx_qid,
+ dev_info->nb_rx_queues);
+ return -EINVAL;
+ }
- if (max_rxq == 0) {
- RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
- return -EINVAL;
+ if (pfc_queue_conf->tx_pause.tc >= tc_max) {
+ RTE_ETHDEV_LOG(ERR,
+ "PFC TC not in range for Tx pause requested:%d max:%d\n",
+ pfc_queue_conf->tx_pause.tc, tc_max);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int
+rte_eth_dev_priority_flow_ctrl_queue_info_get(uint16_t port_id,
+ struct rte_eth_pfc_queue_info *pfc_queue_info)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (pfc_queue_info == NULL) {
+ RTE_ETHDEV_LOG(ERR, "PFC info param is NULL for port (%u)\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
+ return eth_err(port_id, (*dev->dev_ops->priority_flow_ctrl_queue_info_get)
+ (dev, pfc_queue_info));
+ return -ENOTSUP;
+}
+
+int
+rte_eth_dev_priority_flow_ctrl_queue_configure(uint16_t port_id,
+ struct rte_eth_pfc_queue_conf *pfc_queue_conf)
+{
+ struct rte_eth_pfc_queue_info pfc_info;
+ struct rte_eth_dev_info dev_info;
+ struct rte_eth_dev *dev;
+ int ret;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (pfc_queue_conf == NULL) {
+ RTE_ETHDEV_LOG(ERR, "PFC parameters are NULL for port (%u)\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ ret = rte_eth_dev_info_get(port_id, &dev_info);
+ if (ret != 0)
+ return ret;
+
+ ret = rte_eth_dev_priority_flow_ctrl_queue_info_get(port_id, &pfc_info);
+ if (ret != 0)
+ return ret;
+
+ if (pfc_info.tc_max == 0) {
+ RTE_ETHDEV_LOG(ERR, "Ethdev port %u does not support PFC TC values\n",
+ port_id);
+ return -ENOTSUP;
+ }
+
+ /* Check requested mode supported or not */
+ if (pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE &&
+ pfc_queue_conf->mode == RTE_ETH_FC_TX_PAUSE) {
+ RTE_ETHDEV_LOG(ERR, "PFC Tx pause unsupported for port (%d)\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE &&
+ pfc_queue_conf->mode == RTE_ETH_FC_RX_PAUSE) {
+ RTE_ETHDEV_LOG(ERR, "PFC Rx pause unsupported for port (%d)\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ /* Validate Rx pause parameters */
+ if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
+ pfc_info.mode_capa == RTE_ETH_FC_RX_PAUSE) {
+ ret = validate_rx_pause_config(&dev_info, pfc_info.tc_max,
+ pfc_queue_conf);
+ if (ret != 0)
+ return ret;
+ }
+
+ /* Validate Tx pause parameters */
+ if (pfc_info.mode_capa == RTE_ETH_FC_FULL ||
+ pfc_info.mode_capa == RTE_ETH_FC_TX_PAUSE) {
+ ret = validate_tx_pause_config(&dev_info, pfc_info.tc_max,
+ pfc_queue_conf);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (*dev->dev_ops->priority_flow_ctrl_queue_config)
+ return eth_err(port_id,
+ (*dev->dev_ops->priority_flow_ctrl_queue_config)(
+ dev, pfc_queue_conf));
+ return -ENOTSUP;
+}
+
+static int
+eth_check_reta_mask(struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size)
+{
+ uint16_t i, num;
+
+ num = (reta_size + RTE_ETH_RETA_GROUP_SIZE - 1) / RTE_ETH_RETA_GROUP_SIZE;
+ for (i = 0; i < num; i++) {
+ if (reta_conf[i].mask)
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int
+eth_check_reta_entry(struct rte_eth_rss_reta_entry64 *reta_conf,
+ uint16_t reta_size,
+ uint16_t max_rxq)
+{
+ uint16_t i, idx, shift;
+
+ if (max_rxq == 0) {
+ RTE_ETHDEV_LOG(ERR, "No receive queue is available\n");
+ return -EINVAL;
}
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
- if ((reta_conf[idx].mask & (1ULL << shift)) &&
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
+ if ((reta_conf[idx].mask & RTE_BIT64(shift)) &&
(reta_conf[idx].reta[shift] >= max_rxq)) {
RTE_ETHDEV_LOG(ERR,
"reta_conf[%u]->reta[%u]: %u exceeds the maximum rxq index: %u\n",
return -EINVAL;
}
- if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
return -EINVAL;
}
return -EINVAL;
}
- if (udp_tunnel->prot_type >= RTE_TUNNEL_TYPE_MAX) {
+ if (udp_tunnel->prot_type >= RTE_ETH_TUNNEL_TYPE_MAX) {
RTE_ETHDEV_LOG(ERR, "Invalid tunnel type\n");
return -EINVAL;
}
port_id);
return -EINVAL;
}
- if (pool >= ETH_64_POOLS) {
- RTE_ETHDEV_LOG(ERR, "Pool id must be 0-%d\n", ETH_64_POOLS - 1);
+ if (pool >= RTE_ETH_64_POOLS) {
+ RTE_ETHDEV_LOG(ERR, "Pool ID must be 0-%d\n", RTE_ETH_64_POOLS - 1);
return -EINVAL;
}
pool_mask = dev->data->mac_pool_sel[index];
/* Check if both MAC address and pool is already there, and do nothing */
- if (pool_mask & (1ULL << pool))
+ if (pool_mask & RTE_BIT64(pool))
return 0;
}
rte_ether_addr_copy(addr, &dev->data->mac_addrs[index]);
/* Update pool bitmap in NIC data structure */
- dev->data->mac_pool_sel[index] |= (1ULL << pool);
+ dev->data->mac_pool_sel[index] |= RTE_BIT64(pool);
}
return eth_err(port_id, ret);
if (queue_idx > dev_info.max_tx_queues) {
RTE_ETHDEV_LOG(ERR,
- "Set queue rate limit:port %u: invalid queue id=%u\n",
+ "Set queue rate limit:port %u: invalid queue ID=%u\n",
port_id, queue_idx);
return -EINVAL;
}
return ret;
}
-int
-rte_eth_dev_callback_process(struct rte_eth_dev *dev,
- enum rte_eth_event_type event, void *ret_param)
-{
- struct rte_eth_dev_callback *cb_lst;
- struct rte_eth_dev_callback dev_cb;
- int rc = 0;
-
- rte_spinlock_lock(ð_dev_cb_lock);
- TAILQ_FOREACH(cb_lst, &(dev->link_intr_cbs), next) {
- if (cb_lst->cb_fn == NULL || cb_lst->event != event)
- continue;
- dev_cb = *cb_lst;
- cb_lst->active = 1;
- if (ret_param != NULL)
- dev_cb.ret_param = ret_param;
-
- rte_spinlock_unlock(ð_dev_cb_lock);
- rc = dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
- dev_cb.cb_arg, dev_cb.ret_param);
- rte_spinlock_lock(ð_dev_cb_lock);
- cb_lst->active = 0;
- }
- rte_spinlock_unlock(ð_dev_cb_lock);
- return rc;
-}
-
-void
-rte_eth_dev_probing_finish(struct rte_eth_dev *dev)
-{
- if (dev == NULL)
- return;
-
- /*
- * for secondary process, at that point we expect device
- * to be already 'usable', so shared data and all function pointers
- * for fast-path devops have to be setup properly inside rte_eth_dev.
- */
- if (rte_eal_process_type() == RTE_PROC_SECONDARY)
- eth_dev_fp_ops_setup(rte_eth_fp_ops + dev->data->port_id, dev);
-
- rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_NEW, NULL);
-
- dev->state = RTE_ETH_DEV_ATTACHED;
-}
-
int
rte_eth_dev_rx_intr_ctl(uint16_t port_id, int epfd, int op, void *data)
{
dev = &rte_eth_devices[port_id];
if (!dev->intr_handle) {
- RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
return -ENOTSUP;
}
intr_handle = dev->intr_handle;
- if (!intr_handle->intr_vec) {
- RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
+ if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
+ RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
return -EPERM;
}
for (qid = 0; qid < dev->data->nb_rx_queues; qid++) {
- vec = intr_handle->intr_vec[qid];
+ vec = rte_intr_vec_list_index_get(intr_handle, qid);
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
if (rc && rc != -EEXIST) {
RTE_ETHDEV_LOG(ERR,
- "p %u q %u rx ctl error op %d epfd %d vec %u\n",
+ "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
port_id, qid, op, epfd, vec);
}
}
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
return -1;
}
if (!dev->intr_handle) {
- RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
return -1;
}
intr_handle = dev->intr_handle;
- if (!intr_handle->intr_vec) {
- RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
+ if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
+ RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
return -1;
}
- vec = intr_handle->intr_vec[queue_id];
+ vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
efd_idx = (vec >= RTE_INTR_VEC_RXTX_OFFSET) ?
(vec - RTE_INTR_VEC_RXTX_OFFSET) : vec;
- fd = intr_handle->efds[efd_idx];
+ fd = rte_intr_efds_index_get(intr_handle, efd_idx);
return fd;
}
-static inline int
-eth_dev_dma_mzone_name(char *name, size_t len, uint16_t port_id, uint16_t queue_id,
- const char *ring_name)
-{
- return snprintf(name, len, "eth_p%d_q%d_%s",
- port_id, queue_id, ring_name);
-}
-
-const struct rte_memzone *
-rte_eth_dma_zone_reserve(const struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id, size_t size, unsigned align,
- int socket_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
- int rc;
-
- rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
- queue_id, ring_name);
- if (rc >= RTE_MEMZONE_NAMESIZE) {
- RTE_ETHDEV_LOG(ERR, "ring name too long\n");
- rte_errno = ENAMETOOLONG;
- return NULL;
- }
-
- mz = rte_memzone_lookup(z_name);
- if (mz) {
- if ((socket_id != SOCKET_ID_ANY && socket_id != mz->socket_id) ||
- size > mz->len ||
- ((uintptr_t)mz->addr & (align - 1)) != 0) {
- RTE_ETHDEV_LOG(ERR,
- "memzone %s does not justify the requested attributes\n",
- mz->name);
- return NULL;
- }
-
- return mz;
- }
-
- return rte_memzone_reserve_aligned(z_name, size, socket_id,
- RTE_MEMZONE_IOVA_CONTIG, align);
-}
-
-int
-rte_eth_dma_zone_free(const struct rte_eth_dev *dev, const char *ring_name,
- uint16_t queue_id)
-{
- char z_name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
- int rc = 0;
-
- rc = eth_dev_dma_mzone_name(z_name, sizeof(z_name), dev->data->port_id,
- queue_id, ring_name);
- if (rc >= RTE_MEMZONE_NAMESIZE) {
- RTE_ETHDEV_LOG(ERR, "ring name too long\n");
- return -ENAMETOOLONG;
- }
-
- mz = rte_memzone_lookup(z_name);
- if (mz)
- rc = rte_memzone_free(mz);
- else
- rc = -ENOENT;
-
- return rc;
-}
-
-int
-rte_eth_dev_create(struct rte_device *device, const char *name,
- size_t priv_data_size,
- ethdev_bus_specific_init ethdev_bus_specific_init,
- void *bus_init_params,
- ethdev_init_t ethdev_init, void *init_params)
-{
- struct rte_eth_dev *ethdev;
- int retval;
-
- RTE_FUNC_PTR_OR_ERR_RET(*ethdev_init, -EINVAL);
-
- if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
- ethdev = rte_eth_dev_allocate(name);
- if (!ethdev)
- return -ENODEV;
-
- if (priv_data_size) {
- ethdev->data->dev_private = rte_zmalloc_socket(
- name, priv_data_size, RTE_CACHE_LINE_SIZE,
- device->numa_node);
-
- if (!ethdev->data->dev_private) {
- RTE_ETHDEV_LOG(ERR,
- "failed to allocate private data\n");
- retval = -ENOMEM;
- goto probe_failed;
- }
- }
- } else {
- ethdev = rte_eth_dev_attach_secondary(name);
- if (!ethdev) {
- RTE_ETHDEV_LOG(ERR,
- "secondary process attach failed, ethdev doesn't exist\n");
- return -ENODEV;
- }
- }
-
- ethdev->device = device;
-
- if (ethdev_bus_specific_init) {
- retval = ethdev_bus_specific_init(ethdev, bus_init_params);
- if (retval) {
- RTE_ETHDEV_LOG(ERR,
- "ethdev bus specific initialisation failed\n");
- goto probe_failed;
- }
- }
-
- retval = ethdev_init(ethdev, init_params);
- if (retval) {
- RTE_ETHDEV_LOG(ERR, "ethdev initialisation failed\n");
- goto probe_failed;
- }
-
- rte_eth_dev_probing_finish(ethdev);
-
- return retval;
-
-probe_failed:
- rte_eth_dev_release_port(ethdev);
- return retval;
-}
-
-int
-rte_eth_dev_destroy(struct rte_eth_dev *ethdev,
- ethdev_uninit_t ethdev_uninit)
-{
- int ret;
-
- ethdev = rte_eth_dev_allocated(ethdev->data->name);
- if (!ethdev)
- return -ENODEV;
-
- RTE_FUNC_PTR_OR_ERR_RET(*ethdev_uninit, -EINVAL);
-
- ret = ethdev_uninit(ethdev);
- if (ret)
- return ret;
-
- return rte_eth_dev_release_port(ethdev);
-}
-
int
rte_eth_dev_rx_intr_ctl_q(uint16_t port_id, uint16_t queue_id,
int epfd, int op, void *data)
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
return -EINVAL;
}
if (!dev->intr_handle) {
- RTE_ETHDEV_LOG(ERR, "RX Intr handle unset\n");
+ RTE_ETHDEV_LOG(ERR, "Rx Intr handle unset\n");
return -ENOTSUP;
}
intr_handle = dev->intr_handle;
- if (!intr_handle->intr_vec) {
- RTE_ETHDEV_LOG(ERR, "RX Intr vector unset\n");
+ if (rte_intr_vec_list_index_get(intr_handle, 0) < 0) {
+ RTE_ETHDEV_LOG(ERR, "Rx Intr vector unset\n");
return -EPERM;
}
- vec = intr_handle->intr_vec[queue_id];
+ vec = rte_intr_vec_list_index_get(intr_handle, queue_id);
rc = rte_intr_rx_ctl(intr_handle, epfd, op, vec, data);
if (rc && rc != -EEXIST) {
RTE_ETHDEV_LOG(ERR,
- "p %u q %u rx ctl error op %d epfd %d vec %u\n",
+ "p %u q %u Rx ctl error op %d epfd %d vec %u\n",
port_id, queue_id, op, epfd, vec);
return rc;
}
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_rx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid RX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Rx queue_id=%u\n", queue_id);
return -EINVAL;
}
dev = &rte_eth_devices[port_id];
if (queue_id >= dev->data->nb_tx_queues) {
- RTE_ETHDEV_LOG(ERR, "Invalid TX queue_id=%u\n", queue_id);
+ RTE_ETHDEV_LOG(ERR, "Invalid Tx queue_id=%u\n", queue_id);
return -EINVAL;
}
return eth_err(port_id, (*dev->dev_ops->hairpin_cap_get)(dev, cap));
}
-int
-rte_eth_dev_is_rx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
-{
- if (dev->data->rx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
- return 1;
- return 0;
-}
-
-int
-rte_eth_dev_is_tx_hairpin_queue(struct rte_eth_dev *dev, uint16_t queue_id)
-{
- if (dev->data->tx_queue_state[queue_id] == RTE_ETH_QUEUE_STATE_HAIRPIN)
- return 1;
- return 0;
-}
-
int
rte_eth_dev_pool_ops_supported(uint16_t port_id, const char *pool)
{
return (*dev->dev_ops->pool_ops_supported)(dev, pool);
}
-/**
- * A set of values to describe the possible states of a switch domain.
- */
-enum rte_eth_switch_domain_state {
- RTE_ETH_SWITCH_DOMAIN_UNUSED = 0,
- RTE_ETH_SWITCH_DOMAIN_ALLOCATED
-};
-
-/**
- * Array of switch domains available for allocation. Array is sized to
- * RTE_MAX_ETHPORTS elements as there cannot be more active switch domains than
- * ethdev ports in a single process.
- */
-static struct rte_eth_dev_switch {
- enum rte_eth_switch_domain_state state;
-} eth_dev_switch_domains[RTE_MAX_ETHPORTS];
-
-int
-rte_eth_switch_domain_alloc(uint16_t *domain_id)
-{
- uint16_t i;
-
- *domain_id = RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID;
-
- for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (eth_dev_switch_domains[i].state ==
- RTE_ETH_SWITCH_DOMAIN_UNUSED) {
- eth_dev_switch_domains[i].state =
- RTE_ETH_SWITCH_DOMAIN_ALLOCATED;
- *domain_id = i;
- return 0;
- }
- }
-
- return -ENOSPC;
-}
-
-int
-rte_eth_switch_domain_free(uint16_t domain_id)
-{
- if (domain_id == RTE_ETH_DEV_SWITCH_DOMAIN_ID_INVALID ||
- domain_id >= RTE_MAX_ETHPORTS)
- return -EINVAL;
-
- if (eth_dev_switch_domains[domain_id].state !=
- RTE_ETH_SWITCH_DOMAIN_ALLOCATED)
- return -EINVAL;
-
- eth_dev_switch_domains[domain_id].state = RTE_ETH_SWITCH_DOMAIN_UNUSED;
-
- return 0;
-}
-
-static int
-eth_dev_devargs_tokenise(struct rte_kvargs *arglist, const char *str_in)
-{
- int state;
- struct rte_kvargs_pair *pair;
- char *letter;
-
- arglist->str = strdup(str_in);
- if (arglist->str == NULL)
- return -ENOMEM;
-
- letter = arglist->str;
- state = 0;
- arglist->count = 0;
- pair = &arglist->pairs[0];
- while (1) {
- switch (state) {
- case 0: /* Initial */
- if (*letter == '=')
- return -EINVAL;
- else if (*letter == '\0')
- return 0;
-
- state = 1;
- pair->key = letter;
- /* fall-thru */
-
- case 1: /* Parsing key */
- if (*letter == '=') {
- *letter = '\0';
- pair->value = letter + 1;
- state = 2;
- } else if (*letter == ',' || *letter == '\0')
- return -EINVAL;
- break;
-
-
- case 2: /* Parsing value */
- if (*letter == '[')
- state = 3;
- else if (*letter == ',') {
- *letter = '\0';
- arglist->count++;
- pair = &arglist->pairs[arglist->count];
- state = 0;
- } else if (*letter == '\0') {
- letter--;
- arglist->count++;
- pair = &arglist->pairs[arglist->count];
- state = 0;
- }
- break;
-
- case 3: /* Parsing list */
- if (*letter == ']')
- state = 2;
- else if (*letter == '\0')
- return -EINVAL;
- break;
- }
- letter++;
- }
-}
-
-int
-rte_eth_devargs_parse(const char *dargs, struct rte_eth_devargs *eth_da)
-{
- struct rte_kvargs args;
- struct rte_kvargs_pair *pair;
- unsigned int i;
- int result = 0;
-
- memset(eth_da, 0, sizeof(*eth_da));
-
- result = eth_dev_devargs_tokenise(&args, dargs);
- if (result < 0)
- goto parse_cleanup;
-
- for (i = 0; i < args.count; i++) {
- pair = &args.pairs[i];
- if (strcmp("representor", pair->key) == 0) {
- if (eth_da->type != RTE_ETH_REPRESENTOR_NONE) {
- RTE_LOG(ERR, EAL, "duplicated representor key: %s\n",
- dargs);
- result = -1;
- goto parse_cleanup;
- }
- result = rte_eth_devargs_parse_representor_ports(
- pair->value, eth_da);
- if (result < 0)
- goto parse_cleanup;
- }
- }
-
-parse_cleanup:
- if (args.str)
- free(args.str);
-
- return result;
-}
-
-int
-rte_eth_representor_id_get(uint16_t port_id,
- enum rte_eth_representor_type type,
- int controller, int pf, int representor_port,
- uint16_t *repr_id)
-{
- int ret, n, count;
- uint32_t i;
- struct rte_eth_representor_info *info = NULL;
- size_t size;
-
- if (type == RTE_ETH_REPRESENTOR_NONE)
- return 0;
- if (repr_id == NULL)
- return -EINVAL;
-
- /* Get PMD representor range info. */
- ret = rte_eth_representor_info_get(port_id, NULL);
- if (ret == -ENOTSUP && type == RTE_ETH_REPRESENTOR_VF &&
- controller == -1 && pf == -1) {
- /* Direct mapping for legacy VF representor. */
- *repr_id = representor_port;
- return 0;
- } else if (ret < 0) {
- return ret;
- }
- n = ret;
- size = sizeof(*info) + n * sizeof(info->ranges[0]);
- info = calloc(1, size);
- if (info == NULL)
- return -ENOMEM;
- info->nb_ranges_alloc = n;
- ret = rte_eth_representor_info_get(port_id, info);
- if (ret < 0)
- goto out;
-
- /* Default controller and pf to caller. */
- if (controller == -1)
- controller = info->controller;
- if (pf == -1)
- pf = info->pf;
-
- /* Locate representor ID. */
- ret = -ENOENT;
- for (i = 0; i < info->nb_ranges; ++i) {
- if (info->ranges[i].type != type)
- continue;
- if (info->ranges[i].controller != controller)
- continue;
- if (info->ranges[i].id_end < info->ranges[i].id_base) {
- RTE_LOG(WARNING, EAL, "Port %hu invalid representor ID Range %u - %u, entry %d\n",
- port_id, info->ranges[i].id_base,
- info->ranges[i].id_end, i);
- continue;
-
- }
- count = info->ranges[i].id_end - info->ranges[i].id_base + 1;
- switch (info->ranges[i].type) {
- case RTE_ETH_REPRESENTOR_PF:
- if (pf < info->ranges[i].pf ||
- pf >= info->ranges[i].pf + count)
- continue;
- *repr_id = info->ranges[i].id_base +
- (pf - info->ranges[i].pf);
- ret = 0;
- goto out;
- case RTE_ETH_REPRESENTOR_VF:
- if (info->ranges[i].pf != pf)
- continue;
- if (representor_port < info->ranges[i].vf ||
- representor_port >= info->ranges[i].vf + count)
- continue;
- *repr_id = info->ranges[i].id_base +
- (representor_port - info->ranges[i].vf);
- ret = 0;
- goto out;
- case RTE_ETH_REPRESENTOR_SF:
- if (info->ranges[i].pf != pf)
- continue;
- if (representor_port < info->ranges[i].sf ||
- representor_port >= info->ranges[i].sf + count)
- continue;
- *repr_id = info->ranges[i].id_base +
- (representor_port - info->ranges[i].sf);
- ret = 0;
- goto out;
- default:
- break;
- }
- }
-out:
- free(info);
- return ret;
-}
-
static int
eth_dev_handle_port_list(const char *cmd __rte_unused,
const char *params __rte_unused,
rte_tel_data_add_dict_string(d, status_str, "UP");
rte_tel_data_add_dict_u64(d, "speed", link.link_speed);
rte_tel_data_add_dict_string(d, "duplex",
- (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ (link.link_duplex == RTE_ETH_LINK_FULL_DUPLEX) ?
"full-duplex" : "half-duplex");
return 0;
}
struct rte_tel_data *d)
{
struct rte_tel_data *rxq_state, *txq_state;
- char mac_addr[RTE_ETHER_ADDR_LEN];
+ char mac_addr[RTE_ETHER_ADDR_FMT_SIZE];
struct rte_eth_dev *eth_dev;
char *end_param;
int port_id, i;
return -EINVAL;
eth_dev = &rte_eth_devices[port_id];
- if (!eth_dev)
- return -EINVAL;
rxq_state = rte_tel_data_alloc();
if (!rxq_state)
return -ENOMEM;
txq_state = rte_tel_data_alloc();
- if (!txq_state)
+ if (!txq_state) {
+ rte_tel_data_free(rxq_state);
return -ENOMEM;
+ }
rte_tel_data_start_dict(d);
rte_tel_data_add_dict_string(d, "name", eth_dev->data->name);
eth_dev->data->min_rx_buf_size);
rte_tel_data_add_dict_int(d, "rx_mbuf_alloc_fail",
eth_dev->data->rx_mbuf_alloc_failed);
- snprintf(mac_addr, RTE_ETHER_ADDR_LEN, "%02x:%02x:%02x:%02x:%02x:%02x",
- eth_dev->data->mac_addrs->addr_bytes[0],
- eth_dev->data->mac_addrs->addr_bytes[1],
- eth_dev->data->mac_addrs->addr_bytes[2],
- eth_dev->data->mac_addrs->addr_bytes[3],
- eth_dev->data->mac_addrs->addr_bytes[4],
- eth_dev->data->mac_addrs->addr_bytes[5]);
+ rte_ether_format_addr(mac_addr, sizeof(mac_addr),
+ eth_dev->data->mac_addrs);
rte_tel_data_add_dict_string(d, "mac_addr", mac_addr);
rte_tel_data_add_dict_int(d, "promiscuous",
eth_dev->data->promiscuous);
}
int
-rte_eth_hairpin_queue_peer_update(uint16_t peer_port, uint16_t peer_queue,
- struct rte_hairpin_peer_info *cur_info,
- struct rte_hairpin_peer_info *peer_info,
- uint32_t direction)
+rte_eth_representor_info_get(uint16_t port_id,
+ struct rte_eth_representor_info *info)
{
struct rte_eth_dev *dev;
- /* Current queue information is not mandatory. */
- if (peer_info == NULL)
- return -EINVAL;
-
- /* No need to check the validity again. */
- dev = &rte_eth_devices[peer_port];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_update,
- -ENOTSUP);
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
- return (*dev->dev_ops->hairpin_queue_peer_update)(dev, peer_queue,
- cur_info, peer_info, direction);
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
+ return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
}
int
-rte_eth_hairpin_queue_peer_bind(uint16_t cur_port, uint16_t cur_queue,
- struct rte_hairpin_peer_info *peer_info,
- uint32_t direction)
+rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
{
struct rte_eth_dev *dev;
- if (peer_info == NULL)
- return -EINVAL;
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
- /* No need to check the validity again. */
- dev = &rte_eth_devices[cur_port];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_bind,
- -ENOTSUP);
+ if (dev->data->dev_configured != 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "The port (ID=%"PRIu16") is already configured\n",
+ port_id);
+ return -EBUSY;
+ }
- return (*dev->dev_ops->hairpin_queue_peer_bind)(dev, cur_queue,
- peer_info, direction);
+ if (features == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
+ return eth_err(port_id,
+ (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
}
int
-rte_eth_hairpin_queue_peer_unbind(uint16_t cur_port, uint16_t cur_queue,
- uint32_t direction)
+rte_eth_ip_reassembly_capability_get(uint16_t port_id,
+ struct rte_eth_ip_reassembly_params *reassembly_capa)
{
struct rte_eth_dev *dev;
- /* No need to check the validity again. */
- dev = &rte_eth_devices[cur_port];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->hairpin_queue_peer_unbind,
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (dev->data->dev_configured == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Device with port_id=%u is not configured.\n"
+ "Cannot get IP reassembly capability\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (reassembly_capa == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get reassembly capability to NULL");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_capability_get,
-ENOTSUP);
+ memset(reassembly_capa, 0, sizeof(struct rte_eth_ip_reassembly_params));
- return (*dev->dev_ops->hairpin_queue_peer_unbind)(dev, cur_queue,
- direction);
+ return eth_err(port_id, (*dev->dev_ops->ip_reassembly_capability_get)
+ (dev, reassembly_capa));
}
int
-rte_eth_representor_info_get(uint16_t port_id,
- struct rte_eth_representor_info *info)
+rte_eth_ip_reassembly_conf_get(uint16_t port_id,
+ struct rte_eth_ip_reassembly_params *conf)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->representor_info_get, -ENOTSUP);
- return eth_err(port_id, (*dev->dev_ops->representor_info_get)(dev, info));
+ if (dev->data->dev_configured == 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Device with port_id=%u is not configured.\n"
+ "Cannot get IP reassembly configuration\n",
+ port_id);
+ return -EINVAL;
+ }
+
+ if (conf == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Cannot get reassembly info to NULL");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_get,
+ -ENOTSUP);
+ memset(conf, 0, sizeof(struct rte_eth_ip_reassembly_params));
+ return eth_err(port_id,
+ (*dev->dev_ops->ip_reassembly_conf_get)(dev, conf));
}
int
-rte_eth_rx_metadata_negotiate(uint16_t port_id, uint64_t *features)
+rte_eth_ip_reassembly_conf_set(uint16_t port_id,
+ const struct rte_eth_ip_reassembly_params *conf)
{
struct rte_eth_dev *dev;
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
dev = &rte_eth_devices[port_id];
- if (dev->data->dev_configured != 0) {
+ if (dev->data->dev_configured == 0) {
RTE_ETHDEV_LOG(ERR,
- "The port (id=%"PRIu16") is already configured\n",
+ "Device with port_id=%u is not configured.\n"
+ "Cannot set IP reassembly configuration",
port_id);
- return -EBUSY;
+ return -EINVAL;
}
- if (features == NULL) {
- RTE_ETHDEV_LOG(ERR, "Invalid features (NULL)\n");
+ if (dev->data->dev_started != 0) {
+ RTE_ETHDEV_LOG(ERR,
+ "Device with port_id=%u started,\n"
+ "cannot configure IP reassembly params.\n",
+ port_id);
return -EINVAL;
}
- RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_metadata_negotiate, -ENOTSUP);
+ if (conf == NULL) {
+ RTE_ETHDEV_LOG(ERR,
+ "Invalid IP reassembly configuration (NULL)\n");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->ip_reassembly_conf_set,
+ -ENOTSUP);
return eth_err(port_id,
- (*dev->dev_ops->rx_metadata_negotiate)(dev, features));
+ (*dev->dev_ops->ip_reassembly_conf_set)(dev, conf));
+}
+
+int
+rte_eth_dev_priv_dump(uint16_t port_id, FILE *file)
+{
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+ dev = &rte_eth_devices[port_id];
+
+ if (file == NULL) {
+ RTE_ETHDEV_LOG(ERR, "Invalid file (NULL)\n");
+ return -EINVAL;
+ }
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->eth_dev_priv_dump, -ENOTSUP);
+ return eth_err(port_id, (*dev->dev_ops->eth_dev_priv_dump)(dev, file));
}
RTE_LOG_REGISTER_DEFAULT(rte_eth_dev_logtype, INFO);