static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete);
+ int wait_to_complete);
static void vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
- struct rte_eth_stats *stats);
+ struct rte_eth_stats *stats);
static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
- struct rte_eth_dev_info *dev_info);
+ struct rte_eth_dev_info *dev_info);
static const uint32_t *
vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
.allmulticast_disable = vmxnet3_dev_allmulticast_disable,
.link_update = vmxnet3_dev_link_update,
.stats_get = vmxnet3_dev_stats_get,
- .mac_addr_set = vmxnet3_mac_addr_set,
+ .mac_addr_set = vmxnet3_mac_addr_set,
.dev_infos_get = vmxnet3_dev_info_get,
.dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
.vlan_filter_set = vmxnet3_dev_vlan_filter_set,
static const struct rte_memzone *
gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
- const char *post_string, int socket_id, uint16_t align)
+ const char *post_string, int socket_id, uint16_t align)
{
char z_name[RTE_MEMZONE_NAMESIZE];
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d_%s",
- dev->driver->pci_drv.driver.name,
- dev->data->port_id, post_string);
+ dev->driver->pci_drv.driver.name, dev->data->port_id, post_string);
mz = rte_memzone_lookup(z_name);
if (mz)
return mz;
- return rte_memzone_reserve_aligned(z_name, size,
- socket_id, 0, align);
+ return rte_memzone_reserve_aligned(z_name, size, socket_id, 0, align);
}
/**
struct rte_eth_link *src = link;
if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
+ *(uint64_t *)src) == 0)
return -1;
return 0;
hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
for (i = 0; i < VMXNET3_MAX_INTRS; i++)
- VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
+ VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
}
/*
/* Getting MAC Address */
mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
- memcpy(hw->perm_addr , &mac_lo, 4);
- memcpy(hw->perm_addr+4, &mac_hi, 2);
+ memcpy(hw->perm_addr, &mac_lo, 4);
+ memcpy(hw->perm_addr + 4, &mac_hi, 2);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
hw->queue_desc_len = (uint16_t)size;
if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
-
/* Allocate memory structure for UPT1_RSSConf and configure */
mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf), "rss_conf",
rte_socket_id(), RTE_CACHE_LINE_SIZE);
/* Setting up Guest OS information */
devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
- VMXNET3_GOS_BITS_32 :
- VMXNET3_GOS_BITS_64;
+ VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
devRead->misc.driverInfo.vmxnet3RevSpt = 1;
devRead->misc.driverInfo.uptVerSpt = 1;
}
vmxnet3_dev_vlan_offload_set(dev,
- ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
+ ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
vmxnet3_write_mac(hw, hw->perm_addr);
struct UPT1_TxStats *txStats = &hw->tqd_start[i].stats;
stats->q_opackets[i] = txStats->ucastPktsTxOK +
- txStats->mcastPktsTxOK +
- txStats->bcastPktsTxOK;
+ txStats->mcastPktsTxOK +
+ txStats->bcastPktsTxOK;
stats->q_obytes[i] = txStats->ucastBytesTxOK +
- txStats->mcastBytesTxOK +
- txStats->bcastBytesTxOK;
+ txStats->mcastBytesTxOK +
+ txStats->bcastBytesTxOK;
stats->opackets += stats->q_opackets[i];
stats->obytes += stats->q_obytes[i];
- stats->oerrors += txStats->pktsTxError +
- txStats->pktsTxDiscard;
+ stats->oerrors += txStats->pktsTxError + txStats->pktsTxDiscard;
}
RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
struct UPT1_RxStats *rxStats = &hw->rqd_start[i].stats;
stats->q_ipackets[i] = rxStats->ucastPktsRxOK +
- rxStats->mcastPktsRxOK +
- rxStats->bcastPktsRxOK;
+ rxStats->mcastPktsRxOK +
+ rxStats->bcastPktsRxOK;
stats->q_ibytes[i] = rxStats->ucastBytesRxOK +
- rxStats->mcastBytesRxOK +
- rxStats->bcastBytesRxOK;
+ rxStats->mcastBytesRxOK +
+ rxStats->bcastBytesRxOK;
stats->ipackets += stats->q_ipackets[i];
stats->ibytes += stats->q_ibytes[i];
}
static void
-vmxnet3_dev_info_get(__attribute__((unused))struct rte_eth_dev *dev,
+vmxnet3_dev_info_get(__rte_unused struct rte_eth_dev *dev,
struct rte_eth_dev_info *dev_info)
{
dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
/* return 0 means link status changed, -1 means not changed */
static int
-vmxnet3_dev_link_update(struct rte_eth_dev *dev, __attribute__((unused)) int wait_to_complete)
+vmxnet3_dev_link_update(struct rte_eth_dev *dev,
+ __rte_unused int wait_to_complete)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
struct rte_eth_link old, link;
uint32_t ret;
+ /* Link status doesn't change for stopped dev */
if (dev->data->dev_started == 0)
- return -1; /* Link status doesn't change for stopped dev */
+ return -1;
memset(&link, 0, sizeof(link));
vmxnet3_dev_atomic_read_link_status(dev, &old);
/* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
static void
-vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set) {
-
+vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
+{
struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
if (set)
/* Check if link state has changed */
if (events & VMXNET3_ECR_LINK)
PMD_INIT_LOG(ERR,
- "Process events in %s(): VMXNET3_ECR_LINK event", __func__);
+ "Process events in %s(): VMXNET3_ECR_LINK event",
+ __func__);
/* Check if there is an error on xmit/recv queues */
if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
- VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_QUEUE_STATUS);
+ VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_QUEUE_STATUS);
if (hw->tqd_start->status.stopped)
PMD_INIT_LOG(ERR, "tq error 0x%x",
if (events & VMXNET3_ECR_DEBUG)
PMD_INIT_LOG(ERR, "Debug event generated by device.");
-
}
#endif
ETH_RSS_NONFRAG_IPV6_TCP)
/* RSS configuration structure - shared with device through GPA */
-typedef
-struct VMXNET3_RSSConf {
+typedef struct VMXNET3_RSSConf {
uint16_t hashType;
uint16_t hashFunc;
uint16_t hashKeySize;
uint8_t indTable[VMXNET3_RSS_MAX_IND_TABLE_SIZE];
} VMXNET3_RSSConf;
-typedef
-struct vmxnet3_mf_table {
+typedef struct vmxnet3_mf_table {
void *mfTableBase; /* Multicast addresses list */
uint64_t mfTablePA; /* Physical address of the list */
uint16_t num_addrs; /* number of multicast addrs */
} vmxnet3_mf_table_t;
struct vmxnet3_hw {
-
uint8_t *hw_addr0; /* BAR0: PT-Passthrough Regs */
uint8_t *hw_addr1; /* BAR1: VD-Virtual Device Regs */
/* BAR2: MSI-X Regs */
uint64_t queueDescPA;
uint16_t queue_desc_len;
- VMXNET3_RSSConf *rss_conf;
- uint64_t rss_confPA;
- vmxnet3_mf_table_t *mf_table;
- uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
+ VMXNET3_RSSConf *rss_conf;
+ uint64_t rss_confPA;
+ vmxnet3_mf_table_t *mf_table;
+ uint32_t shadow_vfta[VMXNET3_VFT_SIZE];
#define VMXNET3_VFT_TABLE_SIZE (VMXNET3_VFT_SIZE * sizeof(uint32_t))
};
#define VMXNET3_PCI_REG(reg) (*((volatile uint32_t *)(reg)))
-static inline uint32_t vmxnet3_read_addr(volatile void *addr)
+static inline uint32_t
+vmxnet3_read_addr(volatile void *addr)
{
return VMXNET3_PCI_REG(addr);
}
void vmxnet3_dev_tx_queue_release(void *txq);
int vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
- uint16_t nb_rx_desc, unsigned int socket_id,
- const struct rte_eth_rxconf *rx_conf,
- struct rte_mempool *mb_pool);
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
int vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
- uint16_t nb_tx_desc, unsigned int socket_id,
- const struct rte_eth_txconf *tx_conf);
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
int vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev);
int vmxnet3_rss_configure(struct rte_eth_dev *dev);
uint16_t vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
- uint16_t nb_pkts);
+ uint16_t nb_pkts);
uint16_t vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
- uint16_t nb_pkts);
+ uint16_t nb_pkts);
#endif /* _VMXNET3_ETHDEV_H_ */
}
typedef struct vmxnet3_comp_ring {
- uint32_t size;
- uint32_t next2proc;
- uint8_t gen;
- uint8_t intr_idx;
+ uint32_t size;
+ uint32_t next2proc;
+ uint8_t gen;
+ uint8_t intr_idx;
Vmxnet3_GenericDesc *base;
- uint64_t basePA;
+ uint64_t basePA;
} vmxnet3_comp_ring_t;
struct vmxnet3_data_ring {
}
struct vmxnet3_txq_stats {
- uint64_t drop_total; /* # of pkts dropped by the driver,
+ uint64_t drop_total; /* # of pkts dropped by the driver,
* the counters below track droppings due to
* different reasons
*/
- uint64_t drop_too_many_segs;
- uint64_t drop_tso;
- uint64_t tx_ring_full;
+ uint64_t drop_too_many_segs;
+ uint64_t drop_tso;
+ uint64_t tx_ring_full;
};
typedef struct vmxnet3_tx_queue {
uint32_t qid1;
uint32_t qid2;
Vmxnet3_RxQueueDesc *shared;
- struct rte_mbuf *start_seg;
- struct rte_mbuf *last_seg;
+ struct rte_mbuf *start_seg;
+ struct rte_mbuf *last_seg;
struct vmxnet3_rxq_stats stats;
bool stopped;
uint16_t queue_id; /**< Device RX queue index. */
ring->buf_info = NULL;
}
-
void
vmxnet3_dev_tx_queue_release(void *txq)
{
continue;
}
- if (txm->nb_segs == 1 && rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+ if (txm->nb_segs == 1 &&
+ rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
struct Vmxnet3_TxDataDesc *tdd;
tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
if (copy_size)
gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
- txq->cmd_ring.next2fill *
- sizeof(struct Vmxnet3_TxDataDesc));
+ txq->cmd_ring.next2fill *
+ sizeof(struct Vmxnet3_TxDataDesc));
else
gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
/*
* Allocates mbufs and clusters. Post rx descriptors with buffer details
* so that device can receive packets in those buffers.
- * Ring layout:
- * Among the two rings, 1st ring contains buffers of type 0 and type1.
+ * Ring layout:
+ * Among the two rings, 1st ring contains buffers of type 0 and type 1.
* bufs_per_pkt is set such that for non-LRO cases all the buffers required
* by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
* 2nd ring contains buffers of type 1 alone. Second ring mostly be used
* only for LRO.
- *
*/
static int
vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
buf_info->m = mbuf;
buf_info->len = (uint16_t)(mbuf->buf_len -
RTE_PKTMBUF_HEADROOM);
- buf_info->bufPA =
- rte_mbuf_data_dma_addr_default(mbuf);
+ buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
/* Load Rx Descriptor with the buffer's GPA */
rxd->addr = buf_info->bufPA;
goto rcd_done;
}
-
/* Initialize newly received packet buffer */
rxm->port = rxq->port_id;
rxm->nb_segs = 1;
rcd_done:
rxq->cmd_ring[ring_idx].next2comp = idx;
- VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
+ VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
+ rxq->cmd_ring[ring_idx].size);
/* It's time to allocate some new buf and renew descriptors */
vmxnet3_post_rx_bufs(rxq, ring_idx);
rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
nb_rxd++;
if (nb_rxd > rxq->cmd_ring[0].size) {
- PMD_RX_LOG(ERR,
- "Used up quota of receiving packets,"
+ PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
" relinquish control.");
break;
}
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
- dev->driver->pci_drv.driver.name, ring_name,
- dev->data->port_id, queue_id);
+ dev->driver->pci_drv.driver.name, ring_name,
+ dev->data->port_id, queue_id);
mz = rte_memzone_lookup(z_name);
if (mz)
return mz;
return rte_memzone_reserve_aligned(z_name, ring_size,
- socket_id, 0, VMXNET3_RING_BA_ALIGN);
+ socket_id, 0, VMXNET3_RING_BA_ALIGN);
}
int
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+ __rte_unused const struct rte_eth_txconf *tx_conf)
{
struct vmxnet3_hw *hw = dev->data->dev_private;
const struct rte_memzone *mz;
return -EINVAL;
}
- txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
+ txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
+ RTE_CACHE_LINE_SIZE);
if (txq == NULL) {
PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
return -ENOMEM;
uint16_t queue_idx,
uint16_t nb_desc,
unsigned int socket_id,
- __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+ __rte_unused const struct rte_eth_rxconf *rx_conf,
struct rte_mempool *mp)
{
const struct rte_memzone *mz;
struct vmxnet3_rx_queue *rxq;
- struct vmxnet3_hw *hw = dev->data->dev_private;
+ struct vmxnet3_hw *hw = dev->data->dev_private;
struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
struct vmxnet3_comp_ring *comp_ring;
int size;
PMD_INIT_FUNC_TRACE();
- rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
+ rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
+ RTE_CACHE_LINE_SIZE);
if (rxq == NULL) {
PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
return -ENOMEM;
ring->rid = i;
snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
- ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+ ring->buf_info = rte_zmalloc(mem_name,
+ ring->size * sizeof(vmxnet3_buf_info_t),
+ RTE_CACHE_LINE_SIZE);
if (ring->buf_info == NULL) {
PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
return -ENOMEM;
/* Passing 0 as alloc_num will allocate full ring */
ret = vmxnet3_post_rx_bufs(rxq, j);
if (ret <= 0) {
- PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
+ PMD_INIT_LOG(ERR,
+ "ERROR: Posting Rxq: %d buffers ring: %d",
+ i, j);
return -ret;
}
- /* Updating device with the index:next2fill to fill the mbufs for coming packets */
+ /*
+ * Updating device with the index:next2fill to fill the
+ * mbufs for coming packets.
+ */
if (unlikely(rxq->shared->ctrl.updateRxProd)) {
VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
rxq->cmd_ring[j].next2fill);
dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
/* loading hashKeySize */
dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
- /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
+ /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
if (port_rss_conf->rss_key == NULL) {
}
/* loading hashKey */
- memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize);
+ memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
+ dev_rss_conf->hashKeySize);
/* loading indTable */
for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {