hw->bufs_per_pkt = 1;
/* Check h/w version compatibility with driver. */
- ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
- PMD_INIT_LOG(DEBUG, "Harware version : %d\n", ver);
- if (ver & 0x1)
+ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
+ PMD_INIT_LOG(DEBUG, "Harware version : %d\n", ver);
+ if (ver & 0x1)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS, 1);
- else {
+ else {
PMD_INIT_LOG(ERR, "Uncompatiable h/w version, should be 0x1\n");
return -EIO;
- }
+ }
- /* Check UPT version compatibility with driver. */
- ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
- PMD_INIT_LOG(DEBUG, "UPT harware version : %d\n", ver);
- if (ver & 0x1)
+ /* Check UPT version compatibility with driver. */
+ ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
+ PMD_INIT_LOG(DEBUG, "UPT harware version : %d\n", ver);
+ if (ver & 0x1)
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
- else {
+ else {
PMD_INIT_LOG(ERR, "Incompatiable UPT version.\n");
return -EIO;
- }
+ }
/* Getting MAC Address */
mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
- VMXNET3_MAX_MAC_ADDRS, 0);
+ VMXNET3_MAX_MAC_ADDRS, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
return -ENOMEM;
}
/* Copy the permanent MAC address */
ether_addr_copy((struct ether_addr *) hw->perm_addr,
ð_dev->data->mac_addrs[0]);
- PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x \n",
- hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
- hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+ PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
+ hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
/* Put device in Quiesce Mode */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
{
const struct rte_memzone *mz;
struct vmxnet3_hw *hw =
- VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
size_t size;
PMD_INIT_FUNC_TRACE();
if (dev->data->nb_rx_queues > UINT8_MAX ||
- dev->data->nb_tx_queues > UINT8_MAX)
- return (-EINVAL);
+ dev->data->nb_tx_queues > UINT8_MAX)
+ return -EINVAL;
size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
- dev->data->nb_tx_queues * sizeof (struct Vmxnet3_RxQueueDesc);
+ dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
if (size > UINT16_MAX)
- return (-EINVAL);
+ return -EINVAL;
hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
* Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
* on current socket
*/
- mz = gpa_zone_reserve(dev, sizeof (struct Vmxnet3_DriverShared),
- "shared", rte_socket_id(), 8);
+ mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
+ "shared", rte_socket_id(), 8);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating shared zone\n");
- return (-ENOMEM);
+ return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
hw->sharedPA = mz->phys_addr;
/*
- * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
- * on current socket
- */
+ * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
+ * on current socket
+ */
mz = gpa_zone_reserve(dev, size, "queuedesc",
- rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
+ rte_socket_id(), VMXNET3_QUEUE_DESC_ALIGN);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
- return (-ENOMEM);
+ return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
hw->queueDescPA = mz->phys_addr;
hw->queue_desc_len = (uint16_t)size;
- if(dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
/* Allocate memory structure for UPT1_RSSConf and configure */
- mz = gpa_zone_reserve(dev, sizeof (struct VMXNET3_RSSConf), "rss_conf",
- rte_socket_id(), CACHE_LINE_SIZE);
+ mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
+ rte_socket_id(), CACHE_LINE_SIZE);
if (mz == NULL) {
- PMD_INIT_LOG(ERR, "ERROR: Creating rss_conf structure zone\n");
- return (-ENOMEM);
+ PMD_INIT_LOG(ERR,
+ "ERROR: Creating rss_conf structure zone\n");
+ return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
/* Setting up Guest OS information */
devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
- VMXNET3_GOS_BITS_32 :
- VMXNET3_GOS_BITS_64;
+ VMXNET3_GOS_BITS_32 :
+ VMXNET3_GOS_BITS_64;
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
devRead->misc.driverInfo.vmxnet3RevSpt = 1;
devRead->misc.driverInfo.uptVerSpt = 1;
devRead->misc.numRxQueues = hw->num_rx_queues;
/*
- * Set number of interrupts to 1
- * PMD disables all the interrupts but this is MUST to activate device
- * It needs at least one interrupt for link events to handle
- * So we'll disable it later after device activation if needed
- */
+ * Set number of interrupts to 1
+ * PMD disables all the interrupts but this is MUST to activate device
+ * It needs at least one interrupt for link events to handle
+ * So we'll disable it later after device activation if needed
+ */
devRead->intrConf.numIntrs = 1;
devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
devRead->rxFilterConf.rxMode = 0;
/* Setting up feature flags */
- if(dev->data->dev_conf.rxmode.hw_ip_checksum) {
+ if (dev->data->dev_conf.rxmode.hw_ip_checksum)
devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
- }
- if(dev->data->dev_conf.rxmode.hw_vlan_strip) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_strip)
devRead->misc.uptFeatures |= VMXNET3_F_RXVLAN;
- }
- if(port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
+ if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
ret = vmxnet3_rss_configure(dev);
- if(ret != VMXNET3_SUCCESS) {
+ if (ret != VMXNET3_SUCCESS)
return ret;
- }
+
devRead->misc.uptFeatures |= VMXNET3_F_RSS;
devRead->rssConfDesc.confVer = 1;
devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
devRead->rssConfDesc.confPA = hw->rss_confPA;
}
- if(dev->data->dev_conf.rxmode.hw_vlan_filter) {
+ if (dev->data->dev_conf.rxmode.hw_vlan_filter) {
ret = vmxnet3_vlan_configure(dev);
- if(ret != VMXNET3_SUCCESS) {
+ if (ret != VMXNET3_SUCCESS)
return ret;
- }
}
- PMD_INIT_LOG(DEBUG, "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x \n",
- hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
- hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
+ PMD_INIT_LOG(DEBUG,
+ "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x\n",
+ hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
+ hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
/* Write MAC Address back to device */
mac_ptr = (uint32_t *)hw->perm_addr;
PMD_INIT_FUNC_TRACE();
ret = vmxnet3_setup_driver_shared(dev);
- if(ret != VMXNET3_SUCCESS) {
+ if (ret != VMXNET3_SUCCESS)
return ret;
- }
/* Exchange shared data with device */
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
- hw->sharedPA));
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
- hw->sharedPA));
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
+ VMXNET3_GET_ADDR_LO(hw->sharedPA));
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
+ VMXNET3_GET_ADDR_HI(hw->sharedPA));
- /* Activate device by register write */
+ /* Activate device by register write */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
status = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
* Update RxMode of the device
*/
ret = vmxnet3_dev_rxtx_init(dev);
- if(ret != VMXNET3_SUCCESS) {
+ if (ret != VMXNET3_SUCCESS) {
PMD_INIT_LOG(ERR, "Device receive init in %s: UNSUCCESSFUL\n", __func__);
return ret;
}
PMD_INIT_FUNC_TRACE();
- if(hw->adapter_stopped == TRUE) {
+ if (hw->adapter_stopped == TRUE) {
PMD_INIT_LOG(DEBUG, "Device already closed.\n");
return;
}
}
static void
-vmxnet3_dev_stats_get( struct rte_eth_dev *dev, struct rte_eth_stats *stats)
+vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned int i;
struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
- if(set)
+
+ if (set)
rxConf->rxMode = rxConf->rxMode | feature;
else
rxConf->rxMode = rxConf->rxMode & (~feature);
vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
}
vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
}
vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
}
vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
{
struct vmxnet3_hw *hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
}
vmxnet3_process_events(struct vmxnet3_hw *hw)
{
uint32_t events = hw->shared->ecr;
- if (!events){
+
+ if (!events) {
PMD_INIT_LOG(ERR, "No events to process in %s()\n", __func__);
return;
}
/*
- * ECR bits when written with 1b are cleared. Hence write
- * events back to ECR so that the bits which were set will be reset.
- */
+ * ECR bits when written with 1b are cleared. Hence write
+ * events back to ECR so that the bits which were set will be reset.
+ */
VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
/* Check if link state has changed */
- if (events & VMXNET3_ECR_LINK){
- PMD_INIT_LOG(ERR, "Process events in %s(): VMXNET3_ECR_LINK event\n", __func__);
- }
+ if (events & VMXNET3_ECR_LINK)
+ PMD_INIT_LOG(ERR,
+ "Process events in %s(): VMXNET3_ECR_LINK event\n", __func__);
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
if (hw->tqd_start->status.stopped)
PMD_INIT_LOG(ERR, "tq error 0x%x\n",
- hw->tqd_start->status.error);
+ hw->tqd_start->status.error);
if (hw->rqd_start->status.stopped)
PMD_INIT_LOG(ERR, "rq error 0x%x\n",
- hw->rqd_start->status.error);
+ hw->rqd_start->status.error);
- /* Reset the device */
- /* Have to reset the device */
+ /* Reset the device */
+ /* Have to reset the device */
}
if (events & VMXNET3_ECR_DIC)
m = __rte_mbuf_raw_alloc(mp);
__rte_mbuf_sanity_check_raw(m, RTE_MBUF_PKT, 0);
- return (m);
+ return m;
}
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
{
uint32_t avail = 0;
+
if (rxq == NULL)
return;
- PMD_RX_LOG(DEBUG, "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.\n",
- rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
- PMD_RX_LOG(DEBUG, "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.\n",
- (unsigned long)rxq->cmd_ring[0].basePA, (unsigned long)rxq->cmd_ring[1].basePA,
- (unsigned long)rxq->comp_ring.basePA);
+ PMD_RX_LOG(DEBUG,
+ "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.\n",
+ rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
+ PMD_RX_LOG(DEBUG,
+ "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.\n",
+ (unsigned long)rxq->cmd_ring[0].basePA,
+ (unsigned long)rxq->cmd_ring[1].basePA,
+ (unsigned long)rxq->comp_ring.basePA);
avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[0]);
- PMD_RX_LOG(DEBUG, "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u\n",
- (uint32_t)rxq->cmd_ring[0].size, avail, rxq->comp_ring.next2proc,
- rxq->cmd_ring[0].size - avail);
+ PMD_RX_LOG(DEBUG,
+ "RXQ:cmd0: size=%u; free=%u; next2proc=%u; queued=%u\n",
+ (uint32_t)rxq->cmd_ring[0].size, avail,
+ rxq->comp_ring.next2proc,
+ rxq->cmd_ring[0].size - avail);
avail = vmxnet3_cmd_ring_desc_avail(&rxq->cmd_ring[1]);
PMD_RX_LOG(DEBUG, "RXQ:cmd1 size=%u; free=%u; next2proc=%u; queued=%u\n",
- (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
- rxq->cmd_ring[1].size - avail);
+ (uint32_t)rxq->cmd_ring[1].size, avail, rxq->comp_ring.next2proc,
+ rxq->cmd_ring[1].size - avail);
}
vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
{
uint32_t avail = 0;
+
if (txq == NULL)
return;
PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p.\n",
- txq->cmd_ring.base, txq->comp_ring.base);
+ txq->cmd_ring.base, txq->comp_ring.base);
PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx.\n",
- (unsigned long)txq->cmd_ring.basePA, (unsigned long)txq->comp_ring.basePA);
+ (unsigned long)txq->cmd_ring.basePA,
+ (unsigned long)txq->comp_ring.basePA);
avail = vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring);
PMD_TX_LOG(DEBUG, "TXQ: size=%u; free=%u; next2proc=%u; queued=%u\n",
- (uint32_t)txq->cmd_ring.size, avail,
- txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
+ (uint32_t)txq->cmd_ring.size, avail,
+ txq->comp_ring.next2proc, txq->cmd_ring.size - avail);
}
#endif
while (ring->next2comp != ring->next2fill) {
/* No need to worry about tx desc ownership, device is quiesced by now. */
vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
- if(buf_info->m) {
+
+ if (buf_info->m) {
rte_pktmbuf_free(buf_info->m);
buf_info->m = NULL;
buf_info->bufPA = 0;
vmxnet3_dev_tx_queue_release(void *txq)
{
vmxnet3_tx_queue_t *tq = txq;
+
if (txq != NULL) {
/* Release the cmd_ring */
vmxnet3_cmd_ring_release(&tq->cmd_ring);
{
int i;
vmxnet3_rx_queue_t *rq = rxq;
+
if (rxq != NULL) {
/* Release both the cmd_rings */
for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
if (txq != NULL) {
txq->stopped = TRUE;
vmxnet3_dev_tx_queue_release(txq);
for (i = 0; i < dev->data->nb_rx_queues; i++) {
struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
- if(rxq != NULL) {
+
+ if (rxq != NULL) {
rxq->stopped = TRUE;
vmxnet3_dev_rx_queue_release(rxq);
}
static inline void
vmxnet3_tq_tx_complete(vmxnet3_tx_queue_t *txq)
{
- int completed = 0;
- struct rte_mbuf *mbuf;
- vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
- struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
- (comp_ring->base + comp_ring->next2proc);
+ int completed = 0;
+ struct rte_mbuf *mbuf;
+ vmxnet3_comp_ring_t *comp_ring = &txq->comp_ring;
+ struct Vmxnet3_TxCompDesc *tcd = (struct Vmxnet3_TxCompDesc *)
+ (comp_ring->base + comp_ring->next2proc);
- while (tcd->gen == comp_ring->gen) {
+ while (tcd->gen == comp_ring->gen) {
- /* Release cmd_ring descriptor and free mbuf */
+ /* Release cmd_ring descriptor and free mbuf */
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
- VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
+ VMXNET3_ASSERT(txq->cmd_ring.base[tcd->txdIdx].txd.eop == 1);
#endif
- mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
+ mbuf = txq->cmd_ring.buf_info[tcd->txdIdx].m;
if (unlikely(mbuf == NULL))
rte_panic("EOP desc does not point to a valid mbuf");
else
vmxnet3_comp_ring_adv_next2proc(comp_ring);
tcd = (struct Vmxnet3_TxCompDesc *)(comp_ring->base +
- comp_ring->next2proc);
+ comp_ring->next2proc);
completed++;
- }
+ }
- PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.\n", completed);
+ PMD_TX_LOG(DEBUG, "Processed %d tx comps & command descs.\n", completed);
}
uint16_t
-vmxnet3_xmit_pkts( void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts)
+vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
{
uint16_t nb_tx;
Vmxnet3_TxDesc *txd = NULL;
hw = txq->hw;
- if(txq->stopped) {
+ if (txq->stopped) {
PMD_TX_LOG(DEBUG, "Tx queue is stopped.\n");
return 0;
}
vmxnet3_tq_tx_complete(txq);
nb_tx = 0;
- while(nb_tx < nb_pkts) {
+ while (nb_tx < nb_pkts) {
- if(vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
+ if (vmxnet3_cmd_ring_desc_avail(&txq->cmd_ring)) {
txm = tx_pkts[nb_tx];
/* Don't support scatter packets yet, free them if met */
}
/* Needs to minus ether header len */
- if(txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
+ if (txm->pkt.data_len > (hw->cur_mtu + ETHER_HDR_LEN)) {
PMD_TX_LOG(DEBUG, "Packet data_len higher than MTU\n");
rte_pktmbuf_free(tx_pkts[nb_tx]);
txq->stats.drop_total++;
txq->shared->ctrl.txNumDeferred = 0;
/* Notify vSwitch that packets are available. */
VMXNET3_WRITE_BAR0_REG(hw, (VMXNET3_REG_TXPROD + txq->queue_id * VMXNET3_REG_ALIGN),
- txq->cmd_ring.next2fill);
+ txq->cmd_ring.next2fill);
}
- return (nb_tx);
+ return nb_tx;
}
/*
*
*/
static inline int
-vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t* rxq, uint8_t ring_id)
+vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
{
- int err = 0;
- uint32_t i = 0, val = 0;
- struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
-
- while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
+ int err = 0;
+ uint32_t i = 0, val = 0;
+ struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+ while (vmxnet3_cmd_ring_desc_avail(ring) > 0) {
struct Vmxnet3_RxDesc *rxd;
struct rte_mbuf *mbuf;
vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
if (ring->rid == 0) {
- /* Usually: One HEAD type buf per packet
- * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
- * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
- */
+ /* Usually: One HEAD type buf per packet
+ * val = (ring->next2fill % rxq->hw->bufs_per_pkt) ?
+ * VMXNET3_RXD_BTYPE_BODY : VMXNET3_RXD_BTYPE_HEAD;
+ */
/* We use single packet buffer so all heads here */
val = VMXNET3_RXD_BTYPE_HEAD;
*/
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len -
- RTE_PKTMBUF_HEADROOM);
+ RTE_PKTMBUF_HEADROOM);
buf_info->bufPA = RTE_MBUF_DATA_DMA_ADDR_DEFAULT(mbuf);
/* Load Rx Descriptor with the buffer's GPA */
vmxnet3_cmd_ring_adv_next2fill(ring);
i++;
- }
+ }
- /* Return error only if no buffers are posted at present */
- if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size -1))
- return -err;
- else
- return i;
+ /* Return error only if no buffers are posted at present */
+ if (vmxnet3_cmd_ring_desc_avail(ring) >= (ring->size - 1))
+ return -err;
+ else
+ return i;
}
/*
rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
- if(rxq->stopped) {
+ if (rxq->stopped) {
PMD_RX_LOG(DEBUG, "Rx queue is stopped.\n");
return 0;
}
while (rcd->gen == rxq->comp_ring.gen) {
- if(nb_rx >= nb_pkts)
+ if (nb_rx >= nb_pkts)
break;
idx = rcd->rxdIdx;
ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
- if(rcd->sop !=1 || rcd->eop != 1) {
+ if (rcd->sop != 1 || rcd->eop != 1) {
rte_pktmbuf_free_seg(rbi->m);
PMD_RX_LOG(DEBUG, "Packet spread across multiple buffers\n)");
#endif
if (rcd->len == 0) {
PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d]\n)",
- ring_idx, idx);
+ ring_idx, idx);
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
VMXNET3_ASSERT(rcd->sop && rcd->eop);
#endif
/* Assuming a packet is coming in a single packet buffer */
if (rxd->btype != VMXNET3_RXD_BTYPE_HEAD) {
- PMD_RX_LOG(DEBUG, "Alert : Misbehaving device, incorrect "
- " buffer type used. iPacket dropped.\n");
+ PMD_RX_LOG(DEBUG,
+ "Alert : Misbehaving device, incorrect "
+ " buffer type used. iPacket dropped.\n");
rte_pktmbuf_free_seg(rbi->m);
goto rcd_done;
}
rxq->stats.drop_total++;
rxq->stats.drop_err++;
- if(!rcd->fcs) {
+ if (!rcd->fcs) {
rxq->stats.drop_fcs++;
PMD_RX_LOG(ERR, "Recv packet dropped due to frame err.\n");
}
PMD_RX_LOG(ERR, "Error in received packet rcd#:%d rxd:%d\n",
- (int)(rcd - (struct Vmxnet3_RxCompDesc *)
- rxq->comp_ring.base), rcd->rxdIdx);
+ (int)(rcd - (struct Vmxnet3_RxCompDesc *)
+ rxq->comp_ring.base), rcd->rxdIdx);
rte_pktmbuf_free_seg(rxm);
goto rcd_done;
if (rcd->ts) {
PMD_RX_LOG(ERR, "Received packet with vlan ID: %d.\n",
- rcd->tci);
+ rcd->tci);
rxm->ol_flags = PKT_RX_VLAN_PKT;
#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER
VMXNET3_ASSERT(rxm &&
- rte_pktmbuf_mtod(rxm, void *));
+ rte_pktmbuf_mtod(rxm, void *));
#endif
- //Copy vlan tag in packet buffer
+ /* Copy vlan tag in packet buffer */
rxm->pkt.vlan_macip.f.vlan_tci =
rte_le_to_cpu_16((uint16_t)rcd->tci);
vmxnet3_post_rx_bufs(rxq, ring_idx);
if (unlikely(rxq->shared->ctrl.updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
- rxq->cmd_ring[ring_idx].next2fill);
+ rxq->cmd_ring[ring_idx].next2fill);
}
/* Advance to the next descriptor in comp_ring */
rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
nb_rxd++;
if (nb_rxd > rxq->cmd_ring[0].size) {
- PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
- " relinquish control.\n");
+ PMD_RX_LOG(ERR,
+ "Used up quota of receiving packets,"
+ " relinquish control.\n");
break;
}
}
}
- return (nb_rx);
+ return nb_rx;
}
/*
int
vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id,
- __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
{
const struct rte_memzone *mz;
struct vmxnet3_tx_queue *txq;
struct vmxnet3_hw *hw;
- struct vmxnet3_cmd_ring *ring;
- struct vmxnet3_comp_ring *comp_ring;
- int size;
+ struct vmxnet3_cmd_ring *ring;
+ struct vmxnet3_comp_ring *comp_ring;
+ int size;
PMD_INIT_FUNC_TRACE();
hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) !=
- ETH_TXQ_FLAGS_NOMULTSEGS) {
+ ETH_TXQ_FLAGS_NOMULTSEGS) {
PMD_INIT_LOG(ERR, "TX Multi segment not support yet\n");
- return (-EINVAL);
+ return -EINVAL;
}
if ((tx_conf->txq_flags & ETH_TXQ_FLAGS_NOOFFLOADS) !=
- ETH_TXQ_FLAGS_NOOFFLOADS) {
+ ETH_TXQ_FLAGS_NOOFFLOADS) {
PMD_INIT_LOG(ERR, "TX not support offload function yet\n");
- return (-EINVAL);
+ return -EINVAL;
}
txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), CACHE_LINE_SIZE);
if (txq == NULL) {
PMD_INIT_LOG(ERR, "Can not allocate tx queue structure\n");
- return (-ENOMEM);
+ return -ENOMEM;
}
txq->queue_id = queue_idx;
txq->port_id = dev->data->port_id;
txq->shared = &hw->tqd_start[queue_idx];
- txq->hw = hw;
- txq->qid = queue_idx;
- txq->stopped = TRUE;
+ txq->hw = hw;
+ txq->qid = queue_idx;
+ txq->stopped = TRUE;
- ring = &txq->cmd_ring;
- comp_ring = &txq->comp_ring;
+ ring = &txq->cmd_ring;
+ comp_ring = &txq->comp_ring;
- /* Tx vmxnet ring length should be between 512-4096 */
- if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
+ /* Tx vmxnet ring length should be between 512-4096 */
+ if (nb_desc < VMXNET3_DEF_TX_RING_SIZE) {
PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Min: %u\n",
- VMXNET3_DEF_TX_RING_SIZE);
+ VMXNET3_DEF_TX_RING_SIZE);
return -EINVAL;
} else if (nb_desc > VMXNET3_TX_RING_MAX_SIZE) {
PMD_INIT_LOG(ERR, "VMXNET3 Tx Ring Size Max: %u\n",
- VMXNET3_TX_RING_MAX_SIZE);
+ VMXNET3_TX_RING_MAX_SIZE);
return -EINVAL;
- } else {
+ } else {
ring->size = nb_desc;
ring->size &= ~VMXNET3_RING_SIZE_MASK;
- }
- comp_ring->size = ring->size;
+ }
+ comp_ring->size = ring->size;
- /* Tx vmxnet rings structure initialization*/
- ring->next2fill = 0;
- ring->next2comp = 0;
- ring->gen = VMXNET3_INIT_GEN;
- comp_ring->next2proc = 0;
- comp_ring->gen = VMXNET3_INIT_GEN;
+ /* Tx vmxnet rings structure initialization*/
+ ring->next2fill = 0;
+ ring->next2comp = 0;
+ ring->gen = VMXNET3_INIT_GEN;
+ comp_ring->next2proc = 0;
+ comp_ring->gen = VMXNET3_INIT_GEN;
- size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
- size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
+ size = sizeof(struct Vmxnet3_TxDesc) * ring->size;
+ size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
- mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+ mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
- return (-ENOMEM);
+ return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
ring->basePA = mz->phys_addr;
/* comp_ring initialization */
- comp_ring->base = ring->base + ring->size;
- comp_ring->basePA = ring->basePA +
- (sizeof(struct Vmxnet3_TxDesc) * ring->size);
+ comp_ring->base = ring->base + ring->size;
+ comp_ring->basePA = ring->basePA +
+ (sizeof(struct Vmxnet3_TxDesc) * ring->size);
- /* cmd_ring0 buf_info allocation */
+ /* cmd_ring0 buf_info allocation */
ring->buf_info = rte_zmalloc("tx_ring_buf_info",
- ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
+ ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
if (ring->buf_info == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating tx_buf_info structure\n");
- return (-ENOMEM);
+ return -ENOMEM;
}
/* Update the data portion with txq */
int
vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
- uint16_t queue_idx,
- uint16_t nb_desc,
- unsigned int socket_id,
- __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mp)
+ uint16_t queue_idx,
+ uint16_t nb_desc,
+ unsigned int socket_id,
+ __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
{
const struct rte_memzone *mz;
struct vmxnet3_rx_queue *rxq;
hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
mbp_priv = (struct rte_pktmbuf_pool_private *)
- rte_mempool_get_priv(mp);
+ rte_mempool_get_priv(mp);
buf_size = (uint16_t) (mbp_priv->mbuf_data_room_size -
- RTE_PKTMBUF_HEADROOM);
+ RTE_PKTMBUF_HEADROOM);
if (dev->data->dev_conf.rxmode.max_rx_pkt_len > buf_size) {
PMD_INIT_LOG(ERR, "buf_size = %u, max_pkt_len = %u, "
- "VMXNET3 don't support scatter packets yet\n",
- buf_size, dev->data->dev_conf.rxmode.max_rx_pkt_len);
- return (-EINVAL);
+ "VMXNET3 don't support scatter packets yet\n",
+ buf_size, dev->data->dev_conf.rxmode.max_rx_pkt_len);
+ return -EINVAL;
}
rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), CACHE_LINE_SIZE);
if (rxq == NULL) {
PMD_INIT_LOG(ERR, "Can not allocate rx queue structure\n");
- return (-ENOMEM);
+ return -ENOMEM;
}
rxq->mp = mp;
comp_ring = &rxq->comp_ring;
/* Rx vmxnet rings length should be between 256-4096 */
- if(nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
+ if (nb_desc < VMXNET3_DEF_RX_RING_SIZE) {
PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Min: 256\n");
return -EINVAL;
- } else if(nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
+ } else if (nb_desc > VMXNET3_RX_RING_MAX_SIZE) {
PMD_INIT_LOG(ERR, "VMXNET3 Rx Ring Size Max: 4096\n");
return -EINVAL;
} else {
mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
if (mz == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone\n");
- return (-ENOMEM);
+ return -ENOMEM;
}
memset(mz->addr, 0, mz->len);
/* comp_ring initialization */
comp_ring->base = ring1->base + ring1->size;
comp_ring->basePA = ring1->basePA + sizeof(struct Vmxnet3_RxDesc) *
- ring1->size;
+ ring1->size;
/* cmd_ring0-cmd_ring1 buf_info allocation */
- for(i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
+ for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++) {
- ring = &rxq->cmd_ring[i];
- ring->rid = i;
- snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
+ ring = &rxq->cmd_ring[i];
+ ring->rid = i;
+ snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
- ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
- if (ring->buf_info == NULL) {
- PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure\n");
- return (-ENOMEM);
- }
+ ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), CACHE_LINE_SIZE);
+ if (ring->buf_info == NULL) {
+ PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure\n");
+ return -ENOMEM;
+ }
}
- /* Update the data portion with rxq */
- dev->data->rx_queues[queue_idx] = rxq;
+ /* Update the data portion with rxq */
+ dev->data->rx_queues[queue_idx] = rxq;
return 0;
}
hw = VMXNET3_DEV_PRIVATE_TO_HW(dev->data->dev_private);
for (i = 0; i < hw->num_rx_queues; i++) {
-
vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
- for(j = 0;j < VMXNET3_RX_CMDRING_SIZE;j++) {
+
+ for (j = 0; j < VMXNET3_RX_CMDRING_SIZE; j++) {
/* Passing 0 as alloc_num will allocate full ring */
ret = vmxnet3_post_rx_bufs(rxq, j);
if (ret <= 0) {
- PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d\n", i, j);
- return (-ret);
+ PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d\n", i, j);
+ return -ret;
}
/* Updating device with the index:next2fill to fill the mbufs for coming packets */
if (unlikely(rxq->shared->ctrl.updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
- rxq->cmd_ring[j].next2fill);
+ rxq->cmd_ring[j].next2fill);
}
}
rxq->stopped = FALSE;
for (i = 0; i < dev->data->nb_tx_queues; i++) {
struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
+
txq->stopped = FALSE;
}