#define CHARS_PER_UINT32 (sizeof(uint32_t))
#define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
-#define FM10K_SIMPLE_TX_FLAG ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \
- ETH_TXQ_FLAGS_NOOFFLOADS)
-
static void fm10k_close_mbx_service(struct fm10k_hw *hw);
static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
}
+/* Stubs needed for linkage when vPMD is disabled */
+int __attribute__((weak))
+fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+fm10k_recv_pkts_vec(
+ __rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+uint16_t __attribute__((weak))
+fm10k_recv_scattered_pkts_vec(
+ __rte_unused void *rx_queue,
+ __rte_unused struct rte_mbuf **rx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
+int __attribute__((weak))
+fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
+
+{
+ return -1;
+}
+
+void __attribute__((weak))
+fm10k_rx_queue_release_mbufs_vec(
+ __rte_unused struct fm10k_rx_queue *rxq)
+{
+ return;
+}
+
+void __attribute__((weak))
+fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
+{
+ return;
+}
+
+int __attribute__((weak))
+fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
+{
+ return -1;
+}
+
+uint16_t __attribute__((weak))
+fm10k_xmit_pkts_vec(__rte_unused void *tx_queue,
+ __rte_unused struct rte_mbuf **tx_pkts,
+ __rte_unused uint16_t nb_pkts)
+{
+ return 0;
+}
+
/*
* reset queue to initial state, allocate software buffers used when starting
* device.
}
static const struct fm10k_txq_ops def_txq_ops = {
- .release_mbufs = tx_queue_free,
.reset = tx_queue_reset,
};
*/
FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
}
return err;
/* Free mbuf and clean HW ring */
rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
FM10K_TXDCTL_ENABLE | txdctl);
FM10K_WRITE_FLUSH(hw);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
} else
err = -1;
if (tx_queue_id < dev->data->nb_tx_queues) {
tx_queue_disable(hw, tx_queue_id);
tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
}
return 0;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
- txq->ops->release_mbufs(txq);
+ tx_queue_free(txq);
}
}
if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
- return (-EINVAL);
+ return -EINVAL;
}
if (vlan_id > ETH_VLAN_ID_MAX) {
PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
- return (-EINVAL);
+ return -EINVAL;
}
vid_idx = FM10K_VFTA_IDX(vlan_id);
if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
"in the VLAN filter table");
- return (-EINVAL);
+ return -EINVAL;
}
fm10k_mbx_lock(hw);
fm10k_mbx_unlock(hw);
if (result != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
- return (-EIO);
+ return -EIO;
}
for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
}
if (result != FM10K_SUCCESS) {
PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
- return (-EIO);
+ return -EIO;
}
if (on) {
return 0;
}
-/*
- * Create a memzone for hardware descriptor rings. Malloc cannot be used since
- * the physical address is required. If the memzone is already created, then
- * this function returns a pointer to the existing memzone.
- */
-static inline const struct rte_memzone *
-allocate_hw_ring(const char *driver_name, const char *ring_name,
- uint8_t port_id, uint16_t queue_id, int socket_id,
- uint32_t size, uint32_t align)
-{
- char name[RTE_MEMZONE_NAMESIZE];
- const struct rte_memzone *mz;
-
- snprintf(name, sizeof(name), "%s_%s_%d_%d_%d",
- driver_name, ring_name, port_id, queue_id, socket_id);
-
- /* return the memzone if it already exists */
- mz = rte_memzone_lookup(name);
- if (mz)
- return mz;
-
-#ifdef RTE_LIBRTE_XEN_DOM0
- return rte_memzone_reserve_bounded(name, size, socket_id, 0, align,
- RTE_PGSIZE_2M);
-#else
- return rte_memzone_reserve_aligned(name, size, socket_id, 0, align);
-#endif
-}
static inline int
check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
FM10K_RX_FREE_THRESH_MIN(q),
FM10K_RX_FREE_THRESH_DIV(q));
- return (-EINVAL);
+ return -EINVAL;
}
q->alloc_thresh = rx_free_thresh;
/* make sure the mempool element size can account for alignment. */
if (!mempool_element_size_valid(mp)) {
PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
- return (-EINVAL);
+ return -EINVAL;
}
/* make sure a valid number of descriptors have been requested */
"and a multiple of %u",
nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
FM10K_MULT_RX_DESC);
- return (-EINVAL);
+ return -EINVAL;
}
/*
socket_id);
if (q == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* setup queue */
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
if (handle_rxconf(q, conf))
- return (-EINVAL);
+ return -EINVAL;
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
if (q->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
/*
* enough to hold the maximum ring size is requested to allow for
* resizing in later calls to the queue setup function.
*/
- mz = allocate_hw_ring(dev->driver->pci_drv.name, "rx_ring",
- dev->data->port_id, queue_id, socket_id,
- FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC);
+ mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
+ FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
+ socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
rte_free(q->sw_ring);
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
q->hw_ring = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- q->hw_ring_phys_addr = mz->phys_addr;
-#endif
/* Check if number of descs satisfied Vector requirement */
if (!rte_is_power_of_2(nb_desc)) {
tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
FM10K_TX_FREE_THRESH_MIN(q),
FM10K_TX_FREE_THRESH_DIV(q));
- return (-EINVAL);
+ return -EINVAL;
}
q->free_thresh = tx_free_thresh;
tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
FM10K_TX_RS_THRESH_MIN(q),
FM10K_TX_RS_THRESH_DIV(q));
- return (-EINVAL);
+ return -EINVAL;
}
q->rs_thresh = tx_rs_thresh;
"and a multiple of %u",
nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
FM10K_MULT_TX_DESC);
- return (-EINVAL);
+ return -EINVAL;
}
/*
if (dev->data->tx_queues[queue_id] != NULL) {
struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
- txq->ops->release_mbufs(txq);
+ tx_queue_free(txq);
dev->data->tx_queues[queue_id] = NULL;
}
socket_id);
if (q == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* setup queue */
q->tail_ptr = (volatile uint32_t *)
&((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
if (handle_txconf(q, conf))
- return (-EINVAL);
+ return -EINVAL;
/* allocate memory for the software ring */
q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
if (q->sw_ring == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate software ring");
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
/*
* enough to hold the maximum ring size is requested to allow for
* resizing in later calls to the queue setup function.
*/
- mz = allocate_hw_ring(dev->driver->pci_drv.name, "tx_ring",
- dev->data->port_id, queue_id, socket_id,
- FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC);
+ mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
+ FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
+ socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
rte_free(q->sw_ring);
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
q->hw_ring = mz->addr;
-#ifdef RTE_LIBRTE_XEN_DOM0
q->hw_ring_phys_addr = rte_mem_phy2mch(mz->memseg_id, mz->phys_addr);
-#else
- q->hw_ring_phys_addr = mz->phys_addr;
-#endif
/*
* allocate memory for the RS bit tracker. Enough slots to hold the
PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
rte_free(q->sw_ring);
rte_free(q);
- return (-ENOMEM);
+ return -ENOMEM;
}
dev->data->tx_queues[queue_id] = q;
struct fm10k_tx_queue *q = queue;
PMD_INIT_FUNC_TRACE();
- q->ops->release_mbufs(q);
+ tx_queue_free(q);
}
static int
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
- if ((txq->txq_flags & FM10K_SIMPLE_TX_FLAG) !=
- FM10K_SIMPLE_TX_FLAG) {
+ /* Check if Vector Tx is satisfied */
+ if (fm10k_tx_vec_condition_check(txq)) {
use_sse = 0;
break;
}
}
if (use_sse) {
+ PMD_INIT_LOG(DEBUG, "Use vector Tx func");
for (i = 0; i < dev->data->nb_tx_queues; i++) {
txq = dev->data->tx_queues[i];
fm10k_txq_vec_setup(txq);
}
dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
- } else
+ } else {
dev->tx_pkt_burst = fm10k_xmit_pkts;
+ PMD_INIT_LOG(DEBUG, "Use regular Tx func");
+ }
}
static void __attribute__((cold))
dev->rx_pkt_burst = fm10k_recv_pkts_vec;
} else if (dev->data->scattered_rx)
dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
+ else
+ dev->rx_pkt_burst = fm10k_recv_pkts;
rx_using_sse =
(dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
dev->rx_pkt_burst == fm10k_recv_pkts_vec);
+ if (rx_using_sse)
+ PMD_INIT_LOG(DEBUG, "Use vector Rx func");
+ else
+ PMD_INIT_LOG(DEBUG, "Use regular Rx func");
+
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
if (rte_eal_process_type() != RTE_PROC_PRIMARY)
return 0;
+ rte_eth_copy_pci_info(dev, dev->pci_dev);
+
macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
memset(macvlan, 0, sizeof(*macvlan));
/* Vendor and Device ID need to be set before init of shared code */