/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2012 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
rte_eth_dev_cb_fn cb_fn; /**< Callback address */
void *cb_arg; /**< Parameter for callback */
enum rte_eth_event_type event; /**< Interrupt event type */
+ uint32_t active; /**< Callback is executing */
};
enum {
rte_eal_pci_register(ð_drv->pci_drv);
}
+int
+rte_eth_dev_socket_id(uint8_t port_id)
+{
+ if (port_id >= nb_ports)
+ return -1;
+ return rte_eth_devices[port_id].pci_dev->numa_node;
+}
+
uint8_t
rte_eth_dev_count(void)
{
}
dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
+ return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
+}
+
+int
+rte_eth_dev_priority_flow_ctrl_set(uint8_t port_id, struct rte_eth_pfc_conf *pfc_conf)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if (pfc_conf->priority > (ETH_DCB_NUM_USER_PRIORITIES - 1)) {
+ PMD_DEBUG_TRACE("Invalid priority, only 0-7 allowed\n");
+ return (-EINVAL);
+ }
+ dev = &rte_eth_devices[port_id];
/* High water, low water validation are device specific */
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->flow_ctrl_set, -ENOTSUP);
- if (*dev->dev_ops->flow_ctrl_set)
- return (*dev->dev_ops->flow_ctrl_set)(dev, fc_conf);
+ if (*dev->dev_ops->priority_flow_ctrl_set)
+ return (*dev->dev_ops->priority_flow_ctrl_set)(dev, pfc_conf);
+ return (-ENOTSUP);
+}
+
+int
+rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
+{
+ struct rte_eth_dev *dev;
+ uint8_t i,j;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ /* Invalid mask bit(s) setting */
+ if ((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
+ PMD_DEBUG_TRACE("Invalid update mask bits for port=%d\n",port_id);
+ return (-EINVAL);
+ }
+
+ if (reta_conf->mask_lo != 0) {
+ for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
+ if ((reta_conf->mask_lo & (1ULL << i)) &&
+ (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) {
+ PMD_DEBUG_TRACE("RETA hash index output"
+ "configration for port=%d,invalid"
+ "queue=%d\n",port_id,reta_conf->reta[i]);
+
+ return (-EINVAL);
+ }
+ }
+ }
+
+ if (reta_conf->mask_hi != 0) {
+ for (i = 0; i< ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
+ j = (uint8_t)(i + ETH_RSS_RETA_NUM_ENTRIES/2);
+
+ /* Check if the max entry >= 128 */
+ if ((reta_conf->mask_hi & (1ULL << i)) &&
+ (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) {
+ PMD_DEBUG_TRACE("RETA hash index output"
+ "configration for port=%d,invalid"
+ "queue=%d\n",port_id,reta_conf->reta[j]);
+
+ return (-EINVAL);
+ }
+ }
+ }
+
+ dev = &rte_eth_devices[port_id];
- return -ENOTSUP;
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
+ return (*dev->dev_ops->reta_update)(dev, reta_conf);
+}
+
+int
+rte_eth_dev_rss_reta_query(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+
+ if((reta_conf->mask_lo == 0) && (reta_conf->mask_hi == 0)) {
+ PMD_DEBUG_TRACE("Invalid update mask bits for the port=%d\n",port_id);
+ return (-EINVAL);
+ }
+
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_query, -ENOTSUP);
+ return (*dev->dev_ops->reta_query)(dev, reta_conf);
}
int
return (*dev->tx_pkt_burst)(dev->data->tx_queues[queue_id],
tx_pkts, nb_pkts);
}
+
+uint32_t
+rte_eth_rx_queue_count(uint8_t port_id, uint16_t queue_id)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return 0;
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_count, -ENOTSUP);
+ return (*dev->dev_ops->rx_queue_count)(dev, queue_id);
+}
#endif
int
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
{
- int ret = -1;
struct rte_eth_dev *dev;
- struct rte_eth_dev_callback *user_cb = NULL;
+ struct rte_eth_dev_callback *user_cb;
if (!cb_fn)
- return -1;
+ return (-EINVAL);
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -1;
+ return (-EINVAL);
}
+
dev = &rte_eth_devices[port_id];
rte_spinlock_lock(&rte_eth_dev_cb_lock);
+
TAILQ_FOREACH(user_cb, &(dev->callbacks), next) {
if (user_cb->cb_fn == cb_fn &&
user_cb->cb_arg == cb_arg &&
user_cb->event == event) {
- ret = 0;
- goto out;
+ break;
}
}
- user_cb = rte_malloc("INTR_USER_CALLBACK",
- sizeof(struct rte_eth_dev_callback), 0);
- if (!user_cb)
- goto out;
- user_cb->cb_fn = cb_fn;
- user_cb->cb_arg = cb_arg;
- user_cb->event = event;
- TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
- ret = 0;
-out:
- rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ /* create a new callback. */
+ if (user_cb == NULL && (user_cb = rte_zmalloc("INTR_USER_CALLBACK",
+ sizeof(struct rte_eth_dev_callback), 0)) != NULL) {
+ user_cb->cb_fn = cb_fn;
+ user_cb->cb_arg = cb_arg;
+ user_cb->event = event;
+ TAILQ_INSERT_TAIL(&(dev->callbacks), user_cb, next);
+ }
- return ret;
+ rte_spinlock_unlock(&rte_eth_dev_cb_lock);
+ return ((user_cb == NULL) ? -ENOMEM : 0);
}
int
enum rte_eth_event_type event,
rte_eth_dev_cb_fn cb_fn, void *cb_arg)
{
- int ret = -1;
+ int ret;
struct rte_eth_dev *dev;
- struct rte_eth_dev_callback *cb_lst = NULL;
+ struct rte_eth_dev_callback *cb, *next;
if (!cb_fn)
- return -1;
+ return (-EINVAL);
if (port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
- return -1;
+ return (-EINVAL);
}
+
dev = &rte_eth_devices[port_id];
rte_spinlock_lock(&rte_eth_dev_cb_lock);
- TAILQ_FOREACH(cb_lst, &(dev->callbacks), next) {
- if (cb_lst->cb_fn != cb_fn || cb_lst->event != event)
+
+ ret = 0;
+ for (cb = TAILQ_FIRST(&dev->callbacks); cb != NULL; cb = next) {
+
+ next = TAILQ_NEXT(cb, next);
+
+ if (cb->cb_fn != cb_fn || cb->event != event ||
+ (cb->cb_arg != (void *)-1 &&
+ cb->cb_arg != cb_arg))
continue;
- if (cb_lst->cb_arg == (void *)-1 ||
- cb_lst->cb_arg == cb_arg) {
- TAILQ_REMOVE(&(dev->callbacks), cb_lst, next);
- rte_free(cb_lst);
- ret = 0;
+
+ /*
+ * if this callback is not executing right now,
+ * then remove it.
+ */
+ if (cb->active == 0) {
+ TAILQ_REMOVE(&(dev->callbacks), cb, next);
+ rte_free(cb);
+ } else {
+ ret = -EAGAIN;
}
}
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
-
- return ret;
+ return (ret);
}
void
-_rte_eth_dev_callback_process(struct rte_eth_dev *dev, enum rte_eth_event_type event)
+_rte_eth_dev_callback_process(struct rte_eth_dev *dev,
+ enum rte_eth_event_type event)
{
- struct rte_eth_dev_callback *cb_lst = NULL;
+ struct rte_eth_dev_callback *cb_lst;
struct rte_eth_dev_callback dev_cb;
rte_spinlock_lock(&rte_eth_dev_cb_lock);
if (cb_lst->cb_fn == NULL || cb_lst->event != event)
continue;
dev_cb = *cb_lst;
+ cb_lst->active = 1;
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
dev_cb.cb_fn(dev->data->port_id, dev_cb.event,
dev_cb.cb_arg);
rte_spinlock_lock(&rte_eth_dev_cb_lock);
+ cb_lst->active = 0;
}
rte_spinlock_unlock(&rte_eth_dev_cb_lock);
}
-