return !internal_config.no_hugetlbfs;
}
-/* Abstraction for port I/0 privilage */
+/* Abstraction for port I/0 privilege */
static int
rte_eal_iopl_init(void)
{
/*
* Fifo struct mapped in a shared memory. It describes a circular buffer FIFO
- * Write and read should wrap arround. Fifo is empty when write == read
+ * Write and read should wrap around. Fifo is empty when write == read
* Writing should never overwrite the read position
*/
struct rte_kni_fifo {
* @param v
* A pointer to the atomic counter.
* @param dec
- * The value to be substracted from the counter.
+ * The value to be subtracted from the counter.
*/
static inline void
rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
* Subtract a 64-bit value from an atomic counter and return the result.
*
* Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
- * and returns the value of v after the substraction.
+ * and returns the value of v after the subtraction.
*
* @param v
* A pointer to the atomic counter.
* @param dec
- * The value to be substracted from the counter.
+ * The value to be subtracted from the counter.
* @return
- * The value of v after the substraction.
+ * The value of v after the subtraction.
*/
static inline int64_t
rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
* @param v
* A pointer to the atomic counter.
* @return
- * True if the result after substraction is 0; false otherwise.
+ * True if the result after subtraction is 0; false otherwise.
*/
static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
{
* @param v
* A pointer to the atomic counter.
* @param dec
- * The value to be substracted from the counter.
+ * The value to be subtracted from the counter.
*/
static inline void
rte_atomic64_sub(rte_atomic64_t *v, int64_t dec)
* Subtract a 64-bit value from an atomic counter and return the result.
*
* Atomically subtracts the 64-bit value (dec) from the atomic counter (v)
- * and returns the value of v after the substraction.
+ * and returns the value of v after the subtraction.
*
* @param v
* A pointer to the atomic counter.
* @param dec
- * The value to be substracted from the counter.
+ * The value to be subtracted from the counter.
* @return
- * The value of v after the substraction.
+ * The value of v after the subtraction.
*/
static inline int64_t
rte_atomic64_sub_return(rte_atomic64_t *v, int64_t dec)
* @param v
* A pointer to the atomic counter.
* @return
- * True if the result after substraction is 0; false otherwise.
+ * True if the result after subtraction is 0; false otherwise.
*/
static inline int rte_atomic64_dec_and_test(rte_atomic64_t *v)
{
} __attribute__((__packed__));
/* Flag definitions for rte_config flags */
-#define EAL_FLG_HIGH_IOPL 1 /**< indicates high IO privilage in a linux env */
+#define EAL_FLG_HIGH_IOPL 1 /**< indicates high IO privilege in a linux env */
/**
* Get the global configuration structure.
extern "C" {
#endif
-/** Interupt handle */
+/** Interrupt handle */
struct rte_intr_handle;
/** Function to be registered for the specific interrupt */
}
/*
- * Request iopl priviledge for all RPL, returns 0 on success
+ * Request iopl privilege for all RPL, returns 0 on success
*/
static int
rte_eal_iopl_init(void)
/* not used at this moment */
case RTE_INTR_HANDLE_ALARM:
return -1;
- /* unkown handle type */
+ /* unknown handle type */
default:
RTE_LOG(ERR, EAL,
"Unknown handle type of fd %d\n",
/* not used at this moment */
case RTE_INTR_HANDLE_ALARM:
return -1;
- /* unkown handle type */
+ /* unknown handle type */
default:
RTE_LOG(ERR, EAL,
"Unknown handle type of fd %d\n",
}
/*
- * Try to mmap *size bytes in /dev/zero. If it is succesful, return the
+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the
* pointer to the mmap'd area and keep *size unmodified. Else, retry
* with a smaller zone: decrease *size by hugepage_sz until it reaches
* 0. In this case, return NULL. Note: this function returns an address
static const char sys_dir_path[] = "/sys/kernel/mm/dom0-mm/memsize-mB";
/*
- * Try to mmap *size bytes in /dev/zero. If it is succesful, return the
+ * Try to mmap *size bytes in /dev/zero. If it is successful, return the
* pointer to the mmap'd area and keep *size unmodified. Else, retry
* with a smaller zone: decrease *size by mem_size until it reaches
* 0. In this case, return NULL. Note: this function returns an address
/*
* Fifo struct mapped in a shared memory. It describes a circular buffer FIFO
- * Write and read should wrap arround. Fifo is empty when write == read
+ * Write and read should wrap around. Fifo is empty when write == read
* Writing should never overwrite the read position
*/
struct rte_kni_fifo {
/**
* This is uio device mmap method which will use igbuio mmap for Xen
- * Dom0 enviroment.
+ * Dom0 environment.
*/
static int
igbuio_dom0_pci_mmap(struct uio_info *info, struct vm_area_struct *vma)
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
#define IGB_20K_ITR 196
#define IGB_70K_ITR 56
-/* Interrupt modes, as used by the IntMode paramter */
+/* Interrupt modes, as used by the IntMode parameter */
#define IGB_INT_MODE_LEGACY 0
#define IGB_INT_MODE_MSI 1
#define IGB_INT_MODE_MSIX 2
/* start with one vector for every rx queue */
numvecs = adapter->num_rx_queues;
- /* if tx handler is seperate add 1 for every tx queue */
+ /* if tx handler is separate add 1 for every tx queue */
if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
numvecs += adapter->num_tx_queues;
*
* Neither the 82576 nor the 82580 offer registers wide enough to hold
* nanoseconds time values for very long. For the 82580, SYSTIM always
- * counts nanoseconds, but the upper 24 bits are not availible. The
+ * counts nanoseconds, but the upper 24 bits are not available. The
* frequency is adjusted by changing the 32 bit fractional nanoseconds
* register, TIMINCA.
*
} else {
/*
* Write addresses to the MTA, if the attempt fails
- * then we should just turn on promiscous mode so
+ * then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
if (igb_write_mc_addr_list(adapter->netdev) != 0)
/*
* Write addresses to available RAR registers, if there is not
* sufficient space to store all the addresses then enable
- * unicast promiscous mode
+ * unicast promiscuous mode
*/
if (igb_write_vm_addr_list(dev) < 0)
vmolr |= E1000_VMOLR_UPE;
* mmd_eee_adv_to_ethtool_adv_t
* @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers
*
- * A small helper function that translates the MMD EEE Advertisment (7.60)
+ * A small helper function that translates the MMD EEE Advertisement (7.60)
* and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement
* settings.
*/
if (num_block == 0)
return -1;
- /* free memory and destory contiguous region in Xen*/
+ /* free memory and destroy contiguous region in Xen*/
for (i = 0; i< num_block; i++) {
vstart = mm_data->block_info[i].vir_addr;
if (vstart) {
* context need to be stopped before calling this interface.
*
* @param kni
- * The pointer to the context of an existant KNI interface.
+ * The pointer to the context of an existent KNI interface.
*
* @return
* - 0 indicates success.
* Finally constructs the response mbuf and puts it back to the resp_q.
*
* @param kni
- * The pointer to the context of an existant KNI interface.
+ * The pointer to the context of an existent KNI interface.
*
* @return
* - 0
}
/*
- * Depending on memory configuration, objects addresses are spreaded
+ * Depending on memory configuration, objects addresses are spread
* between channels and ranks in RAM: the pool allocator will add
* padding between objects. This function return the new size of the
* object.
/*
* increase trailer to add padding between objects in order to
- * spread them accross memory channels/ranks
+ * spread them across memory channels/ranks
*/
if ((flags & MEMPOOL_F_NO_SPREAD) == 0) {
unsigned new_size;
* never be used. The access to the per-lcore table is of course
* faster than the multi-producer/consumer pool. The cache can be
* disabled if the cache_size argument is set to 0; it can be useful to
- * avoid loosing objects in cache. Note that even if not used, the
+ * avoid losing objects in cache. Note that even if not used, the
* memory space for cache is always reserved in a mempool structure,
* except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
* @param private_data_size
* never be used. The access to the per-lcore table is of course
* faster than the multi-producer/consumer pool. The cache can be
* disabled if the cache_size argument is set to 0; it can be useful to
- * avoid loosing objects in cache. Note that even if not used, the
+ * avoid losing objects in cache. Note that even if not used, the
* memory space for cache is always reserved in a mempool structure,
* except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
* @param private_data_size
* never be used. The access to the per-lcore table is of course
* faster than the multi-producer/consumer pool. The cache can be
* disabled if the cache_size argument is set to 0; it can be useful to
- * avoid loosing objects in cache. Note that even if not used, the
+ * avoid losing objects in cache. Note that even if not used, the
* memory space for cache is always reserved in a mempool structure,
* except if CONFIG_RTE_MEMPOOL_CACHE_MAX_SIZE is set to 0.
* @param private_data_size
/**
* Calculate maximum amount of memory required to store given number of objects.
- * Assumes that the memory buffer will be alligned at page boundary.
+ * Assumes that the memory buffer will be aligned at page boundary.
* Note, that if object size is bigger then page size, then it assumes that
* we have a subsets of physically continuous pages big enough to store
* at least one object.
}
/* Ensure that the inter-port SWSM.SMBI lock bit is clear before
- * first NVM or PHY acess. This should be done for single-port
+ * first NVM or PHY access. This should be done for single-port
* devices, and for one port only on dual-port devices so that
* for those devices we can still use the SMBI lock to synchronize
* inter-port accesses to the PHY & NVM.
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
s32 e1000_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
STATIC s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size,
u16 E1000_UNUSEDARG mbx_id)
* frames to be received after sending an XOFF.
* - Low water mark works best when it is very near the high water mark.
* This allows the receiver to restart by sending XON when it has
- * drained a bit. Here we use an arbitary value of 1500 which will
+ * drained a bit. Here we use an arbitrary value of 1500 which will
* restart after one full frame is pulled from the buffer. There
* could be several smaller frames in the buffer and if so they will
* not trigger the XON until their total number reduces the buffer
* limit for packet length, jumbo frame of any size
* can be accepted, thus we have to enable scattered
* rx if jumbo frames are enabled (or if buffer size
- * is too small to accomodate non-jumbo packets)
+ * is too small to accommodate non-jumbo packets)
* to avoid splitting packets that don't fit into
* one buffer.
*/
* frames to be received after sending an XOFF.
* - Low water mark works best when it is very near the high water mark.
* This allows the receiver to restart by sending XON when it has
- * drained a bit. Here we use an arbitary value of 1500 which will
+ * drained a bit. Here we use an arbitrary value of 1500 which will
* restart after one full frame is pulled from the buffer. There
* could be several smaller frames in the buffer and if so they will
* not trigger the XON until their total number reduces the buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
- * returns SUCCESS if it successfuly read message from buffer
+ * returns SUCCESS if it successfully read message from buffer
**/
STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
}
/**
- * ixgbe_bypass_init - Make some enviroment changes for bypass
+ * ixgbe_bypass_init - Make some environment changes for bypass
*
- * @adapter: pointer to ixgbe_adapter sturcture for access to state bits
+ * @adapter: pointer to ixgbe_adapter structure for access to state bits
*
* This function collects all the modifications needed by the bypass
* driver.
if (ret_val)
goto exit;
- /* Set AUTO back on so FW can recieve events */
+ /* Set AUTO back on so FW can receive events */
ret_val = adapter->bps.ops.bypass_set(hw, BYPASS_PAGE_CTL0,
BYPASS_MODE_OFF_M, BYPASS_AUTO);
*
* If we send a write we can't be sure it took until we can read back
* that same register. It can be a problem as some of the feilds may
- * for valid reasons change inbetween the time wrote the register and
+ * for valid reasons change between the time wrote the register and
* we read it again to verify. So this function check everything we
* can check and then assumes it worked.
*
}
/**
- * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom addres.
+ * ixgbe_bypass_rd_eep_generic - Read the bypass FW eeprom address.
*
* @hw: pointer to hardware structure
* @addr: The bypass eeprom address to read.
}
/*
- * It executes link_update after knowing an interrupt occured.
+ * It executes link_update after knowing an interrupt occurred.
*
* @param dev
* Pointer to struct rte_eth_dev.
rxq->sw_ring, rxq->rx_ring, rxq->rx_ring_phys_addr);
/*
- * Certain constaints must be met in order to use the bulk buffer
+ * Certain constraints must be met in order to use the bulk buffer
* allocation Rx burst function.
*/
use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq);
};
/**
- * continous entry sequence, gather by the same mempool
+ * continuous entry sequence, gather by the same mempool
*/
struct igb_tx_entry_seq {
const struct rte_mempool* pool;
uint64_t tx_ring_phys_addr; /**< TX ring DMA address. */
struct igb_tx_entry *sw_ring; /**< virtual address of SW ring. */
#ifdef RTE_IXGBE_INC_VECTOR
- /** continous tx entry sequence within the same mempool */
+ /** continuous tx entry sequence within the same mempool */
struct igb_tx_entry_seq *sw_ring_seq;
#endif
volatile uint32_t *tdt_reg_addr; /**< Address of TDT register. */
struct rte_eth_dev *eth_dev = NULL;
unsigned i;
- /* do some paramter checking */
+ /* do some parameter checking */
if (rx_queues == NULL && nb_rx_queues > 0)
goto error;
if (tx_queues == NULL && nb_tx_queues > 0)
virtio_dev_rxtx_start(struct rte_eth_dev *dev)
{
/*
- * Start recieve and transmit vrings
+ * Start receive and transmit vrings
* - Setup vring structure for all queues
* - Initialize descriptor for the rx vring
* - Allocate blank mbufs for the each rx descriptor
txd = (Vmxnet3_TxDesc *)(txq->cmd_ring.base + txq->cmd_ring.next2fill);
- /* Fill the tx decriptor */
+ /* Fill the tx descriptor */
tbi = txq->cmd_ring.buf_info + txq->cmd_ring.next2fill;
tbi->bufPA = RTE_MBUF_DATA_DMA_ADDR(txm);
txd->addr = tbi->bufPA;
/*
* Create shared vring between guest and host.
- * Memory is allocated through grant alloc driver, so it is not physical continous.
+ * Memory is allocated through grant alloc driver, so it is not physical continuous.
*/
static void *
gntalloc_vring_create(int queue_type, uint32_t size, int vtidx)
}
/*
- * If there are other enqueues in progress that preceeded us,
+ * If there are other enqueues in progress that preceded us,
* we need to wait for them to complete
*/
while (unlikely(r->prod.tail != prod_head))