~~~~~~~~~~~~~~~~~~~~~~~
To perform data copies using IOAT rawdev devices, the functions
-``rte_ioat_enqueue_copy()`` and ``rte_ioat_do_copies()`` should be used.
+``rte_ioat_enqueue_copy()`` and ``rte_ioat_perform_ops()`` should be used.
Once copies have been completed, the completion will be reported back when
-the application calls ``rte_ioat_completed_copies()``.
+the application calls ``rte_ioat_completed_ops()``.
The ``rte_ioat_enqueue_copy()`` function enqueues a single copy to the
device ring for copying at a later point. The parameters to that function
While the ``rte_ioat_enqueue_copy()`` function enqueues a copy operation on
the device ring, the copy will not actually be performed until after the
-application calls the ``rte_ioat_do_copies()`` function. This function
+application calls the ``rte_ioat_perform_ops()`` function. This function
informs the device hardware of the elements enqueued on the ring, and the
device will begin to process them. It is expected that, for efficiency
reasons, a burst of operations will be enqueued to the device via multiple
-enqueue calls between calls to the ``rte_ioat_do_copies()`` function.
+enqueue calls between calls to the ``rte_ioat_perform_ops()`` function.
The following code from ``test_ioat_rawdev.c`` demonstrates how to enqueue
a burst of copies to the device and start the hardware processing of them:
return -1;
}
}
- rte_ioat_do_copies(dev_id);
+ rte_ioat_perform_ops(dev_id);
To retrieve information about completed copies, the API
-``rte_ioat_completed_copies()`` should be used. This API will return to the
+``rte_ioat_completed_ops()`` should be used. This API will return to the
application a set of completion handles passed in when the relevant copies
were enqueued.
.. code-block:: C
- if (rte_ioat_completed_copies(dev_id, 64, (void *)completed_src,
+ if (rte_ioat_completed_ops(dev_id, 64, (void *)completed_src,
(void *)completed_dst) != RTE_DIM(srcs)) {
- printf("Error with rte_ioat_completed_copies\n");
+ printf("Error with rte_ioat_completed_ops\n");
return -1;
}
for (i = 0; i < RTE_DIM(srcs); i++) {
* Added a per-device configuration flag to disable management
of user-provided completion handles.
+ * Renamed the ``rte_ioat_do_copies()`` API to ``rte_ioat_perform_ops()``,
+ and renamed the ``rte_ioat_completed_copies()`` API to ``rte_ioat_completed_ops()``
+ to better reflect the APIs' purposes, and remove the implication that
+ they are limited to copy operations only.
+ [Note: The old API is still provided but marked as deprecated in the code]
* **Updated the pipeline library for alignment with the P4 language.**
Following this change, the ``ioat_rawdev_autotest`` command has been
removed as no longer needed.
+* raw/ioat: As noted above, the ``rte_ioat_do_copies()`` and
+ ``rte_ioat_completed_copies()`` functions have been renamed to
+ ``rte_ioat_perform_ops()`` and ``rte_ioat_completed_ops()`` respectively.
+
* stack: the experimental tag has been dropped from the stack library, and its
interfaces are considered stable as of DPDK 20.11.
nb_enq = ioat_enqueue_packets(pkts_burst,
nb_rx, rx_config->ioat_ids[i]);
if (nb_enq > 0)
- rte_ioat_do_copies(rx_config->ioat_ids[i]);
+ rte_ioat_perform_ops(rx_config->ioat_ids[i]);
} else {
/* Perform packet software copy, free source packets */
int ret;
function. When using hardware copy mode the packets are enqueued in
copying device's buffer using ``ioat_enqueue_packets()`` which calls
``rte_ioat_enqueue_copy()``. When all received packets are in the
-buffer the copy operations are started by calling ``rte_ioat_do_copies()``.
+buffer the copy operations are started by calling ``rte_ioat_perform_ops()``.
Function ``rte_ioat_enqueue_copy()`` operates on physical address of
the packet. Structure ``rte_mbuf`` contains only physical address to
start of the data buffer (``buf_iova``). Thus the address is adjusted
All completed copies are processed by ``ioat_tx_port()`` function. When using
-hardware copy mode the function invokes ``rte_ioat_completed_copies()``
+hardware copy mode the function invokes ``rte_ioat_completed_ops()``
on each assigned IOAT channel to gather copied packets. If software copy
mode is used the function dequeues copied packets from the rte_ring. Then each
packet MAC address is changed if it was enabled. After that copies are sent
for (i = 0; i < tx_config->nb_queues; i++) {
if (copy_mode == COPY_MODE_IOAT_NUM) {
/* Deque the mbufs from IOAT device. */
- nb_dq = rte_ioat_completed_copies(
+ nb_dq = rte_ioat_completed_ops(
tx_config->ioat_ids[i], MAX_PKT_BURST,
(void *)mbufs_src, (void *)mbufs_dst);
} else {
PRINT_ERR("Error with rte_ioat_enqueue_copy\n");
return -1;
}
- rte_ioat_do_copies(dev_id);
+ rte_ioat_perform_ops(dev_id);
usleep(10);
- if (rte_ioat_completed_copies(dev_id, 1, (void *)&completed[0],
+ if (rte_ioat_completed_ops(dev_id, 1, (void *)&completed[0],
(void *)&completed[1]) != 1) {
- PRINT_ERR("Error with rte_ioat_completed_copies\n");
+ PRINT_ERR("Error with rte_ioat_completed_ops\n");
return -1;
}
if (completed[0] != src || completed[1] != dst) {
return -1;
}
}
- rte_ioat_do_copies(dev_id);
+ rte_ioat_perform_ops(dev_id);
usleep(100);
- if (rte_ioat_completed_copies(dev_id, 64, (void *)completed_src,
+ if (rte_ioat_completed_ops(dev_id, 64, (void *)completed_src,
(void *)completed_dst) != RTE_DIM(srcs)) {
- PRINT_ERR("Error with rte_ioat_completed_copies\n");
+ PRINT_ERR("Error with rte_ioat_completed_ops\n");
return -1;
}
for (i = 0; i < RTE_DIM(srcs); i++) {
* Number of operations enqueued, either 0 or 1
*/
static inline int
+__rte_experimental
rte_ioat_enqueue_copy(int dev_id, phys_addr_t src, phys_addr_t dst,
unsigned int length, uintptr_t src_hdl, uintptr_t dst_hdl,
int fence);
/**
- * Trigger hardware to begin performing enqueued copy operations
+ * Trigger hardware to begin performing enqueued operations
*
* This API is used to write the "doorbell" to the hardware to trigger it
- * to begin the copy operations previously enqueued by rte_ioat_enqueue_copy()
+ * to begin the operations previously enqueued by rte_ioat_enqueue_copy()
*
* @param dev_id
* The rawdev device id of the ioat instance
*/
static inline void
-rte_ioat_do_copies(int dev_id);
+__rte_experimental
+rte_ioat_perform_ops(int dev_id);
/**
- * Returns details of copy operations that have been completed
+ * Returns details of operations that have been completed
*
* If the hdls_disable option was not set when the device was configured,
* the function will return to the caller the user-provided "handles" for
* NOTE: If hdls_disable configuration option for the device is set, this
* parameter is ignored.
* @param src_hdls
- * Array to hold the source handle parameters of the completed copies.
+ * Array to hold the source handle parameters of the completed ops.
* NOTE: If hdls_disable configuration option for the device is set, this
* parameter is ignored.
* @param dst_hdls
- * Array to hold the destination handle parameters of the completed copies.
+ * Array to hold the destination handle parameters of the completed ops.
* NOTE: If hdls_disable configuration option for the device is set, this
* parameter is ignored.
* @return
* to the src_hdls and dst_hdls array parameters.
*/
static inline int
-rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
+__rte_experimental
+rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
uintptr_t *src_hdls, uintptr_t *dst_hdls);
/* include the implementation details from a separate file */
}
/*
- * Trigger hardware to begin performing enqueued copy operations
+ * Trigger hardware to begin performing enqueued operations
*/
static inline void
-rte_ioat_do_copies(int dev_id)
+rte_ioat_perform_ops(int dev_id)
{
struct rte_ioat_rawdev *ioat =
(struct rte_ioat_rawdev *)rte_rawdevs[dev_id].dev_private;
}
/*
- * Returns details of copy operations that have been completed
+ * Returns details of operations that have been completed
*/
static inline int
-rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
+rte_ioat_completed_ops(int dev_id, uint8_t max_copies,
uintptr_t *src_hdls, uintptr_t *dst_hdls)
{
struct rte_ioat_rawdev *ioat =
return count;
}
+static inline void
+__rte_deprecated_msg("use rte_ioat_perform_ops() instead")
+rte_ioat_do_copies(int dev_id) { rte_ioat_perform_ops(dev_id); }
+
+static inline int
+__rte_deprecated_msg("use rte_ioat_completed_ops() instead")
+rte_ioat_completed_copies(int dev_id, uint8_t max_copies,
+ uintptr_t *src_hdls, uintptr_t *dst_hdls)
+{
+ return rte_ioat_completed_ops(dev_id, max_copies, src_hdls, dst_hdls);
+}
+
#endif /* _RTE_IOAT_RAWDEV_FNS_H_ */
nb_enq = ioat_enqueue_packets(pkts_burst,
nb_rx, rx_config->ioat_ids[i]);
if (nb_enq > 0)
- rte_ioat_do_copies(rx_config->ioat_ids[i]);
+ rte_ioat_perform_ops(rx_config->ioat_ids[i]);
} else {
/* Perform packet software copy, free source packets */
int ret;
for (i = 0; i < tx_config->nb_queues; i++) {
if (copy_mode == COPY_MODE_IOAT_NUM) {
/* Deque the mbufs from IOAT device. */
- nb_dq = rte_ioat_completed_copies(
+ nb_dq = rte_ioat_completed_ops(
tx_config->ioat_ids[i], MAX_PKT_BURST,
(void *)mbufs_src, (void *)mbufs_dst);
} else {
/******* Macro to mark functions and fields scheduled for removal *****/
#define __rte_deprecated __attribute__((__deprecated__))
+#define __rte_deprecated_msg(msg) __attribute__((__deprecated__(msg)))
/**
* Mark a function or variable to a weak reference.