/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
void **rxq;
unsigned i;
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
-
- if (dev->data->rx_queues == NULL) {
+ if (dev->data->rx_queues == NULL) { /* first time configuration */
dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
sizeof(dev->data->rx_queues[0]) * nb_queues,
CACHE_LINE_SIZE);
dev->data->nb_rx_queues = 0;
return -(ENOMEM);
}
- } else {
+ } else { /* re-configure */
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+
rxq = dev->data->rx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
void **txq;
unsigned i;
- FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
-
- if (dev->data->tx_queues == NULL) {
+ if (dev->data->tx_queues == NULL) { /* first time configuration */
dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
sizeof(dev->data->tx_queues[0]) * nb_queues,
CACHE_LINE_SIZE);
dev->data->nb_tx_queues = 0;
return -(ENOMEM);
}
- } else {
+ } else { /* re-configure */
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+
txq = dev->data->tx_queues;
for (i = nb_queues; i < old_nb_queues; i++)
(dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB_RSS) ||
(dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB)) {
/* SRIOV only works in VMDq enable mode */
- PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
- "wrong VMDQ mq_mode rx %d tx %d\n",
- port_id, dev_conf->rxmode.mq_mode,
+ PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
+ "wrong VMDQ mq_mode rx %u tx %u\n",
+ port_id,
+ dev_conf->rxmode.mq_mode,
dev_conf->txmode.mq_mode);
return (-EINVAL);
}
case ETH_MQ_RX_VMDQ_DCB:
case ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
- PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
- "unsupported VMDQ mq_mode rx %d\n",
+ PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
+ "unsupported VMDQ mq_mode rx %u\n",
port_id, dev_conf->rxmode.mq_mode);
return (-EINVAL);
default: /* ETH_MQ_RX_VMDQ_ONLY or ETH_MQ_RX_NONE */
switch (dev_conf->txmode.mq_mode) {
case ETH_MQ_TX_VMDQ_DCB:
/* DCB VMDQ in SRIOV mode, not implement yet */
- PMD_DEBUG_TRACE("ethdev port_id=%d SRIOV active, "
- "unsupported VMDQ mq_mode tx %d\n",
+ PMD_DEBUG_TRACE("ethdev port_id=%hhu SRIOV active, "
+ "unsupported VMDQ mq_mode tx %u\n",
port_id, dev_conf->txmode.mq_mode);
return (-EINVAL);
default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
(int) sizeof(struct rte_pktmbuf_pool_private));
return (-ENOSPC);
}
- mbp_priv = (struct rte_pktmbuf_pool_private *)
- ((char *)mp + sizeof(struct rte_mempool));
+ mbp_priv = rte_mempool_get_priv(mp);
if ((uint32_t) (mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM) <
dev_info.min_rx_bufsize) {
PMD_DEBUG_TRACE("%s mbuf_data_room_size %d < %d "
}
dev = &rte_eth_devices[port_id];
+ /* Default device offload capabilities to zero */
+ dev_info->rx_offload_capa = 0;
+ dev_info->tx_offload_capa = 0;
+ dev_info->if_index = 0;
FUNC_PTR_OR_RET(*dev->dev_ops->dev_infos_get);
(*dev->dev_ops->dev_infos_get)(dev, dev_info);
dev_info->pci_dev = dev->pci_dev;
rte_eth_dev_rss_reta_update(uint8_t port_id, struct rte_eth_rss_reta *reta_conf)
{
struct rte_eth_dev *dev;
+ uint16_t max_rxq;
uint8_t i,j;
if (port_id >= nb_ports) {
return (-EINVAL);
}
+ dev = &rte_eth_devices[port_id];
+ max_rxq = (dev->data->nb_rx_queues <= ETH_RSS_RETA_MAX_QUEUE) ?
+ dev->data->nb_rx_queues : ETH_RSS_RETA_MAX_QUEUE;
if (reta_conf->mask_lo != 0) {
for (i = 0; i < ETH_RSS_RETA_NUM_ENTRIES/2; i++) {
if ((reta_conf->mask_lo & (1ULL << i)) &&
- (reta_conf->reta[i] >= ETH_RSS_RETA_MAX_QUEUE)) {
+ (reta_conf->reta[i] >= max_rxq)) {
PMD_DEBUG_TRACE("RETA hash index output"
"configration for port=%d,invalid"
"queue=%d\n",port_id,reta_conf->reta[i]);
/* Check if the max entry >= 128 */
if ((reta_conf->mask_hi & (1ULL << i)) &&
- (reta_conf->reta[j] >= ETH_RSS_RETA_MAX_QUEUE)) {
+ (reta_conf->reta[j] >= max_rxq)) {
PMD_DEBUG_TRACE("RETA hash index output"
"configration for port=%d,invalid"
"queue=%d\n",port_id,reta_conf->reta[j]);
}
}
- dev = &rte_eth_devices[port_id];
-
FUNC_PTR_OR_ERR_RET(*dev->dev_ops->reta_update, -ENOTSUP);
return (*dev->dev_ops->reta_update)(dev, reta_conf);
}
return (*dev->dev_ops->reta_query)(dev, reta_conf);
}
+int
+rte_eth_dev_rss_hash_update(uint8_t port_id, struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *dev;
+ uint16_t rss_hash_protos;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ rss_hash_protos = rss_conf->rss_hf;
+ if ((rss_hash_protos != 0) &&
+ ((rss_hash_protos & ETH_RSS_PROTO_MASK) == 0)) {
+ PMD_DEBUG_TRACE("Invalid rss_hash_protos=0x%x\n",
+ rss_hash_protos);
+ return (-EINVAL);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_update, -ENOTSUP);
+ return (*dev->dev_ops->rss_hash_update)(dev, rss_conf);
+}
+
+int
+rte_eth_dev_rss_hash_conf_get(uint8_t port_id,
+ struct rte_eth_rss_conf *rss_conf)
+{
+ struct rte_eth_dev *dev;
+
+ if (port_id >= nb_ports) {
+ PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
+ return (-ENODEV);
+ }
+ dev = &rte_eth_devices[port_id];
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rss_hash_conf_get, -ENOTSUP);
+ return (*dev->dev_ops->rss_hash_conf_get)(dev, rss_conf);
+}
+
int
rte_eth_led_on(uint8_t port_id)
{