return (nb_ports);
}
+static int
+rte_eth_dev_rx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = dev->data->nb_rx_queues;
+ void **rxq;
+ unsigned i;
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_release, -ENOTSUP);
+
+ if (dev->data->rx_queues == NULL) {
+ dev->data->rx_queues = rte_zmalloc("ethdev->rx_queues",
+ sizeof(dev->data->rx_queues[0]) * nb_queues,
+ CACHE_LINE_SIZE);
+ if (dev->data->rx_queues == NULL) {
+ dev->data->nb_rx_queues = 0;
+ return -(ENOMEM);
+ }
+ } else {
+ rxq = dev->data->rx_queues;
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->rx_queue_release)(rxq[i]);
+ rxq = rte_realloc(rxq, sizeof(rxq[0]) * nb_queues,
+ CACHE_LINE_SIZE);
+ if (rxq == NULL)
+ return -(ENOMEM);
+
+ if (nb_queues > old_nb_queues)
+ memset(rxq + old_nb_queues, 0,
+ sizeof(rxq[0]) * (nb_queues - old_nb_queues));
+
+ dev->data->rx_queues = rxq;
+
+ }
+ dev->data->nb_rx_queues = nb_queues;
+ return (0);
+}
+
+static int
+rte_eth_dev_tx_queue_config(struct rte_eth_dev *dev, uint16_t nb_queues)
+{
+ uint16_t old_nb_queues = dev->data->nb_tx_queues;
+ void **txq;
+ unsigned i;
+
+ FUNC_PTR_OR_ERR_RET(*dev->dev_ops->tx_queue_release, -ENOTSUP);
+
+ if (dev->data->tx_queues == NULL) {
+ dev->data->tx_queues = rte_zmalloc("ethdev->tx_queues",
+ sizeof(dev->data->tx_queues[0]) * nb_queues,
+ CACHE_LINE_SIZE);
+ if (dev->data->tx_queues == NULL) {
+ dev->data->nb_tx_queues = 0;
+ return -(ENOMEM);
+ }
+ } else {
+ txq = dev->data->tx_queues;
+
+ for (i = nb_queues; i < old_nb_queues; i++)
+ (*dev->dev_ops->tx_queue_release)(txq[i]);
+ txq = rte_realloc(txq, sizeof(txq[0]) * nb_queues,
+ CACHE_LINE_SIZE);
+ if (txq == NULL)
+ return -(ENOMEM);
+
+ if (nb_queues > old_nb_queues)
+ memset(txq + old_nb_queues, 0,
+ sizeof(txq[0]) * (nb_queues - old_nb_queues));
+
+ dev->data->tx_queues = txq;
+
+ }
+ dev->data->nb_tx_queues = nb_queues;
+ return (0);
+}
+
int
rte_eth_dev_configure(uint8_t port_id, uint16_t nb_rx_q, uint16_t nb_tx_q,
const struct rte_eth_conf *dev_conf)
(unsigned)dev_info.max_rx_pktlen);
return (-EINVAL);
}
+ else if (dev_conf->rxmode.max_rx_pkt_len < ETHER_MIN_LEN) {
+ PMD_DEBUG_TRACE("ethdev port_id=%d max_rx_pkt_len %u"
+ " < min valid value %u\n",
+ port_id,
+ (unsigned)dev_conf->rxmode.max_rx_pkt_len,
+ (unsigned)ETHER_MIN_LEN);
+ return (-EINVAL);
+ }
} else
/* Use default value */
dev->data->dev_conf.rxmode.max_rx_pkt_len = ETHER_MAX_LEN;
}
}
- diag = (*dev->dev_ops->dev_configure)(dev, nb_rx_q, nb_tx_q);
+ /*
+ * Setup new number of RX/TX queues and reconfigure device.
+ */
+ diag = rte_eth_dev_rx_queue_config(dev, nb_rx_q);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d rte_eth_dev_rx_queue_config = %d\n",
+ port_id, diag);
+ return diag;
+ }
+
+ diag = rte_eth_dev_tx_queue_config(dev, nb_tx_q);
if (diag != 0) {
- rte_free(dev->data->rx_queues);
- rte_free(dev->data->tx_queues);
+ PMD_DEBUG_TRACE("port%d rte_eth_dev_tx_queue_config = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ return diag;
}
- return diag;
+
+ diag = (*dev->dev_ops->dev_configure)(dev);
+ if (diag != 0) {
+ PMD_DEBUG_TRACE("port%d dev_configure = %d\n",
+ port_id, diag);
+ rte_eth_dev_rx_queue_config(dev, 0);
+ rte_eth_dev_tx_queue_config(dev, 0);
+ return diag;
+ }
+
+ return 0;
}
static void
* in a multi-process setup*/
PROC_PRIMARY_OR_ERR_RET(-E_RTE_SECONDARY);
- if (port_id >= nb_ports) {
+ if (port_id >= RTE_MAX_ETHPORTS || port_id >= nb_ports) {
PMD_DEBUG_TRACE("Invalid port_id=%d\n", port_id);
return (-EINVAL);
}
struct rte_eth_rxconf {
struct rte_eth_thresh rx_thresh; /**< RX ring threshold registers. */
uint16_t rx_free_thresh; /**< Drives the freeing of RX descriptors. */
+ uint8_t rx_drop_en; /**< Drop packets if no descriptors are available. */
};
+#define ETH_TXQ_FLAGS_NOMULTSEGS 0x0001 /**< nb_segs=1 for all mbufs */
+#define ETH_TXQ_FLAGS_NOREFCOUNT 0x0002 /**< refcnt can be ignored */
+#define ETH_TXQ_FLAGS_NOMULTMEMP 0x0004 /**< all bufs come from same mempool */
+#define ETH_TXQ_FLAGS_NOVLANOFFL 0x0100 /**< disable VLAN offload */
+#define ETH_TXQ_FLAGS_NOXSUMSCTP 0x0200 /**< disable SCTP checksum offload */
+#define ETH_TXQ_FLAGS_NOXSUMUDP 0x0400 /**< disable UDP checksum offload */
+#define ETH_TXQ_FLAGS_NOXSUMTCP 0x0800 /**< disable TCP checksum offload */
+#define ETH_TXQ_FLAGS_NOOFFLOADS \
+ (ETH_TXQ_FLAGS_NOVLANOFFL | ETH_TXQ_FLAGS_NOXSUMSCTP | \
+ ETH_TXQ_FLAGS_NOXSUMUDP | ETH_TXQ_FLAGS_NOXSUMTCP)
/**
* A structure used to configure a TX ring of an Ethernet port.
*/
struct rte_eth_thresh tx_thresh; /**< TX ring threshold registers. */
uint16_t tx_rs_thresh; /**< Drives the setting of RS bit on TXDs. */
uint16_t tx_free_thresh; /**< Drives the freeing of TX buffers. */
+ uint32_t txq_flags; /**< Set flags for the Tx queue */
};
/**
};
struct rte_eth_dev;
-struct igb_rx_queue;
-struct igb_tx_queue;
struct rte_eth_dev_callback;
/** @internal Structure to keep track of registered callbacks */
* structure associated with an Ethernet device.
*/
-typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev, uint16_t nb_rx_q,
- uint16_t nb_tx_q);
+typedef int (*eth_dev_configure_t)(struct rte_eth_dev *dev);
/**< @internal Ethernet device configuration. */
typedef int (*eth_dev_start_t)(struct rte_eth_dev *dev);
const struct rte_eth_txconf *tx_conf);
/**< @internal Setup a transmit queue of an Ethernet device. */
+typedef void (*eth_queue_release_t)(void *queue);
+/**< @internal Release memory resources allocated by given RX/TX queue. */
+
typedef void (*vlan_filter_set_t)(struct rte_eth_dev *dev,
uint16_t vlan_id,
int on);
/**< @internal filtering of a VLAN Tag Identifier by an Ethernet device. */
-typedef uint16_t (*eth_rx_burst_t)(struct igb_rx_queue *rxq,
+typedef uint16_t (*eth_rx_burst_t)(void *rxq,
struct rte_mbuf **rx_pkts,
uint16_t nb_pkts);
/**< @internal Retrieve input packets from a receive queue of an Ethernet device. */
-typedef uint16_t (*eth_tx_burst_t)(struct igb_tx_queue *txq,
+typedef uint16_t (*eth_tx_burst_t)(void *txq,
struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
/**< @internal Send output packets on a transmit queue of an Ethernet device. */
eth_dev_infos_get_t dev_infos_get; /**< Get device info. */
vlan_filter_set_t vlan_filter_set; /**< Filter VLAN Setup. */
eth_rx_queue_setup_t rx_queue_setup;/**< Set up device RX queue.*/
+ eth_queue_release_t rx_queue_release;/**< Release RX queue.*/
eth_tx_queue_setup_t tx_queue_setup;/**< Set up device TX queue.*/
+ eth_queue_release_t tx_queue_release;/**< Release TX queue.*/
eth_dev_led_on_t dev_led_on; /**< Turn on LED. */
eth_dev_led_off_t dev_led_off; /**< Turn off LED. */
flow_ctrl_set_t flow_ctrl_set; /**< Setup flow control. */
* processes in a multi-process configuration.
*/
struct rte_eth_dev_data {
- struct igb_rx_queue **rx_queues; /**< Array of pointers to RX queues. */
- struct igb_tx_queue **tx_queues; /**< Array of pointers to TX queues. */
+ void **rx_queues; /**< Array of pointers to RX queues. */
+ void **tx_queues; /**< Array of pointers to TX queues. */
uint16_t nb_rx_queues; /**< Number of RX queues. */
uint16_t nb_tx_queues; /**< Number of TX queues. */
* The *tx_rs_thresh* value should be less or equal then
* *tx_free_thresh* value, and both of them should be less then
* *nb_tx_desc* - 3.
+ * - The *txq_flags* member contains flags to pass to the TX queue setup
+ * function to configure the behavior of the TX queue. This should be set
+ * to 0 if no special configuration is required.
*
* Note that setting *tx_free_thresh* or *tx_rs_thresh* value to 0 forces
* the transmit function to use default values.