This patch makes IO memory allocation with socketid,
the txq or rxq descriptor and IO resource can be allocated with
socketid that passed by queue setup ops, which can improve
performance for cross-numa scene.
Cc: stable@dpdk.org
Signed-off-by: Xiaoyun Wang <cloud.wangxiaoyun@huawei.com>
15 files changed:
}
void *dma_zalloc_coherent(void *dev, size_t size, dma_addr_t *dma_handle,
}
void *dma_zalloc_coherent(void *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flag);
-void *dma_zalloc_coherent_aligned(void *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
-void *dma_zalloc_coherent_aligned256k(void *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag);
+ unsigned int socket_id);
+
+void *dma_zalloc_coherent_aligned(void *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned int socket_id);
+
+void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned int socket_id);
+
void dma_free_coherent(void *dev, size_t size, void *virt, dma_addr_t phys);
/* dma pool alloc and free */
#define pci_pool dma_pool
void dma_free_coherent(void *dev, size_t size, void *virt, dma_addr_t phys);
/* dma pool alloc and free */
#define pci_pool dma_pool
-#define pci_pool_alloc(pool, flags, handle) dma_pool_alloc(pool, flags, handle)
+#define pci_pool_alloc(pool, handle) dma_pool_alloc(pool, handle)
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
struct dma_pool *dma_pool_create(const char *name, void *dev, size_t size,
size_t align, size_t boundary);
void dma_pool_destroy(struct dma_pool *pool);
#define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr)
struct dma_pool *dma_pool_create(const char *name, void *dev, size_t size,
size_t align, size_t boundary);
void dma_pool_destroy(struct dma_pool *pool);
-void *dma_pool_alloc(struct pci_pool *pool, int flags, dma_addr_t *dma_addr);
+void *dma_pool_alloc(struct pci_pool *pool, dma_addr_t *dma_addr);
void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma);
#define kzalloc(size, flag) rte_zmalloc(NULL, size, HINIC_MEM_ALLOC_ALIGN_MIN)
void dma_pool_free(struct pci_pool *pool, void *vaddr, dma_addr_t dma);
#define kzalloc(size, flag) rte_zmalloc(NULL, size, HINIC_MEM_ALLOC_ALIGN_MIN)
cmd_vaddr_alloc = dma_zalloc_coherent(dev, (API_CMD_BUF_SIZE +
API_PAYLOAD_ALIGN_SIZE),
cmd_vaddr_alloc = dma_zalloc_coherent(dev, (API_CMD_BUF_SIZE +
API_PAYLOAD_ALIGN_SIZE),
- &cmd_paddr, GFP_KERNEL);
+ &cmd_paddr, SOCKET_ID_ANY);
if (!cmd_vaddr_alloc) {
PMD_DRV_LOG(ERR, "Allocate API CMD dma memory failed");
return -ENOMEM;
if (!cmd_vaddr_alloc) {
PMD_DRV_LOG(ERR, "Allocate API CMD dma memory failed");
return -ENOMEM;
node_vaddr_alloc = dma_zalloc_coherent(dev, (chain->cell_size +
API_CMD_NODE_ALIGN_SIZE),
node_vaddr_alloc = dma_zalloc_coherent(dev, (chain->cell_size +
API_CMD_NODE_ALIGN_SIZE),
- &node_paddr, GFP_KERNEL);
+ &node_paddr, SOCKET_ID_ANY);
if (!node_vaddr_alloc) {
PMD_DRV_LOG(ERR, "Allocate dma API CMD cell failed");
return -ENOMEM;
if (!node_vaddr_alloc) {
PMD_DRV_LOG(ERR, "Allocate dma API CMD cell failed");
return -ENOMEM;
chain->wb_status = (struct hinic_api_cmd_status *)
dma_zalloc_coherent(dev, sizeof(*chain->wb_status),
chain->wb_status = (struct hinic_api_cmd_status *)
dma_zalloc_coherent(dev, sizeof(*chain->wb_status),
- &chain->wb_status_paddr,
- GFP_KERNEL);
+ &chain->wb_status_paddr, SOCKET_ID_ANY);
if (!chain->wb_status) {
PMD_DRV_LOG(ERR, "Allocate DMA wb status failed");
err = -ENOMEM;
if (!chain->wb_status) {
PMD_DRV_LOG(ERR, "Allocate DMA wb status failed");
err = -ENOMEM;
- cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, GFP_KERNEL,
- &cmd_buf->dma_addr);
+ cmd_buf->buf = pci_pool_alloc(cmdqs->cmd_buf_pool, &cmd_buf->dma_addr);
if (!cmd_buf->buf) {
PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
goto alloc_pci_buf_err;
if (!cmd_buf->buf) {
PMD_DRV_LOG(ERR, "Allocate cmd from the pool failed");
goto alloc_pci_buf_err;
eq->virt_addr[pg_num] =
(u8 *)dma_zalloc_coherent_aligned(eq->hwdev,
eq->page_size, &eq->dma_addr[pg_num],
eq->virt_addr[pg_num] =
(u8 *)dma_zalloc_coherent_aligned(eq->hwdev,
eq->page_size, &eq->dma_addr[pg_num],
if (!eq->virt_addr[pg_num]) {
err = -ENOMEM;
goto dma_alloc_err;
if (!eq->virt_addr[pg_num]) {
err = -ENOMEM;
goto dma_alloc_err;
-static void *
-hinic_dma_mem_zalloc(struct hinic_hwdev *hwdev, size_t size,
- dma_addr_t *dma_handle, unsigned int flag, unsigned int align)
+static void *hinic_dma_mem_zalloc(struct hinic_hwdev *hwdev, size_t size,
+ dma_addr_t *dma_handle, unsigned int align,
+ unsigned int socket_id)
{
int rc, alloc_cnt;
const struct rte_memzone *mz;
{
int rc, alloc_cnt;
const struct rte_memzone *mz;
snprintf(z_name, sizeof(z_name), "%s_%d",
hwdev->pcidev_hdl->name, alloc_cnt);
snprintf(z_name, sizeof(z_name), "%s_%d",
hwdev->pcidev_hdl->name, alloc_cnt);
- mz = rte_memzone_reserve_aligned(z_name, size, SOCKET_ID_ANY,
- flag, align);
+ mz = rte_memzone_reserve_aligned(z_name, size, socket_id,
+ RTE_MEMZONE_IOVA_CONTIG, align);
if (!mz) {
PMD_DRV_LOG(ERR, "Alloc dma able memory failed, errno: %d, ma_name: %s, size: 0x%zx",
rte_errno, z_name, size);
if (!mz) {
PMD_DRV_LOG(ERR, "Alloc dma able memory failed, errno: %d, ma_name: %s, size: 0x%zx",
rte_errno, z_name, size);
(void)rte_memzone_free(mz);
}
(void)rte_memzone_free(mz);
}
-void *dma_zalloc_coherent(void *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+void *dma_zalloc_coherent(void *hwdev, size_t size, dma_addr_t *dma_handle,
+ unsigned int socket_id)
- return hinic_dma_mem_zalloc(hwdev, size, dma_handle, flag,
- RTE_CACHE_LINE_SIZE);
+ return hinic_dma_mem_zalloc(hwdev, size, dma_handle,
+ RTE_CACHE_LINE_SIZE, socket_id);
}
void *dma_zalloc_coherent_aligned(void *hwdev, size_t size,
}
void *dma_zalloc_coherent_aligned(void *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle, unsigned int socket_id)
- return hinic_dma_mem_zalloc(hwdev, size, dma_handle, flag,
- HINIC_PAGE_SIZE);
+ return hinic_dma_mem_zalloc(hwdev, size, dma_handle, HINIC_PAGE_SIZE,
+ socket_id);
}
void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size,
}
void *dma_zalloc_coherent_aligned256k(void *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag)
+ dma_addr_t *dma_handle,
+ unsigned int socket_id)
- return hinic_dma_mem_zalloc(hwdev, size, dma_handle, flag,
- HINIC_PAGE_SIZE * 64);
+ return hinic_dma_mem_zalloc(hwdev, size, dma_handle,
+ HINIC_PAGE_SIZE * 64, socket_id);
}
void dma_free_coherent(void *hwdev, size_t size, void *virt, dma_addr_t phys)
}
void dma_free_coherent(void *hwdev, size_t size, void *virt, dma_addr_t phys)
-void *dma_pool_alloc(struct pci_pool *pool, int flags, dma_addr_t *dma_addr)
+void *dma_pool_alloc(struct pci_pool *pool, dma_addr_t *dma_addr)
- buf = hinic_dma_mem_zalloc(pool->hwdev, pool->elem_size,
- dma_addr, flags, (u32)pool->align);
+ buf = hinic_dma_mem_zalloc(pool->hwdev, pool->elem_size, dma_addr,
+ (u32)pool->align, SOCKET_ID_ANY);
if (buf)
rte_atomic32_inc(&pool->inuse);
if (buf)
rte_atomic32_inc(&pool->inuse);
struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
struct hinic_cmdqs *cmdqs;
struct hinic_nic_io *nic_io;
struct hinic_msg_pf_to_mgmt *pf_to_mgmt;
struct hinic_cmdqs *cmdqs;
struct hinic_nic_io *nic_io;
};
int hinic_osdep_init(struct hinic_hwdev *hwdev);
};
int hinic_osdep_init(struct hinic_hwdev *hwdev);
struct hinic_hwif *hwif = hwdev->hwif;
u32 addr_h, addr_l;
struct hinic_hwif *hwif = hwdev->hwif;
u32 addr_h, addr_l;
- send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev,
- MBOX_WB_STATUS_LEN,
- &send_mbox->wb_paddr,
- GFP_KERNEL);
+ send_mbox->wb_vaddr = dma_zalloc_coherent(hwdev, MBOX_WB_STATUS_LEN,
+ &send_mbox->wb_paddr, SOCKET_ID_ANY);
if (!send_mbox->wb_vaddr) {
PMD_DRV_LOG(ERR, "Allocating memory for mailbox wb status failed");
return -ENOMEM;
if (!send_mbox->wb_vaddr) {
PMD_DRV_LOG(ERR, "Allocating memory for mailbox wb status failed");
return -ENOMEM;
static int hinic_alloc_nicio(struct hinic_hwdev *hwdev)
{
static int hinic_alloc_nicio(struct hinic_hwdev *hwdev)
{
- int err;
- u16 max_qps, num_qp;
struct hinic_nic_io *nic_io = hwdev->nic_io;
struct hinic_nic_io *nic_io = hwdev->nic_io;
+ struct rte_pci_device *pdev = hwdev->pcidev_hdl;
+ u16 max_qps, num_qp;
+ int err;
max_qps = hinic_func_max_qnum(hwdev);
if ((max_qps & (max_qps - 1))) {
max_qps = hinic_func_max_qnum(hwdev);
if ((max_qps & (max_qps - 1))) {
- nic_io->ci_vaddr_base =
- dma_zalloc_coherent(hwdev,
+ nic_io->ci_vaddr_base = dma_zalloc_coherent(hwdev,
CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
CI_TABLE_SIZE(num_qp, HINIC_PAGE_SIZE),
- &nic_io->ci_dma_base, GFP_KERNEL);
+ &nic_io->ci_dma_base,
+ pdev->device.numa_node);
if (!nic_io->ci_vaddr_base) {
PMD_DRV_LOG(ERR, "Failed to allocate ci area");
err = -ENOMEM;
if (!nic_io->ci_vaddr_base) {
PMD_DRV_LOG(ERR, "Failed to allocate ci area");
err = -ENOMEM;
wq->queue_buf_vaddr = 0;
}
wq->queue_buf_vaddr = 0;
}
-static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq)
+static int alloc_wq_pages(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
+ unsigned int socket_id)
{
dma_addr_t dma_addr = 0;
wq->queue_buf_vaddr = (u64)(u64 *)
dma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size,
{
dma_addr_t dma_addr = 0;
wq->queue_buf_vaddr = (u64)(u64 *)
dma_zalloc_coherent_aligned256k(hwdev, wq->wq_buf_size,
- &dma_addr, GFP_KERNEL);
if (!wq->queue_buf_vaddr) {
PMD_DRV_LOG(ERR, "Failed to allocate wq page");
return -ENOMEM;
if (!wq->queue_buf_vaddr) {
PMD_DRV_LOG(ERR, "Failed to allocate wq page");
return -ENOMEM;
}
int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
}
int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
- u32 wqebb_shift, u16 q_depth)
+ u32 wqebb_shift, u16 q_depth, unsigned int socket_id)
- err = alloc_wq_pages(hwdev, wq);
+ err = alloc_wq_pages(hwdev, wq, socket_id);
if (err) {
PMD_DRV_LOG(ERR, "Failed to allocate wq pages");
return err;
if (err) {
PMD_DRV_LOG(ERR, "Failed to allocate wq pages");
return err;
wq[i].wq_buf_size = wq_buf_size;
wq[i].q_depth = q_depth;
wq[i].wq_buf_size = wq_buf_size;
wq[i].q_depth = q_depth;
- err = alloc_wq_pages(hwdev, &wq[i]);
+ err = alloc_wq_pages(hwdev, &wq[i], SOCKET_ID_ANY);
if (err) {
PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks");
goto cmdq_block_err;
if (err) {
PMD_DRV_LOG(ERR, "Failed to alloc CMDQ blocks");
goto cmdq_block_err;
int cmdq_blocks);
int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
int cmdq_blocks);
int hinic_wq_allocate(struct hinic_hwdev *hwdev, struct hinic_wq *wq,
- u32 wqebb_shift, u16 q_depth);
+ u32 wqebb_shift, u16 q_depth, unsigned int socket_id);
void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq);
void hinic_wq_free(struct hinic_hwdev *hwdev, struct hinic_wq *wq);
#define HINIC_DEFAULT_BURST_SIZE 32
#define HINIC_DEFAULT_NB_QUEUES 1
#define HINIC_DEFAULT_RING_SIZE 1024
#define HINIC_DEFAULT_BURST_SIZE 32
#define HINIC_DEFAULT_NB_QUEUES 1
#define HINIC_DEFAULT_RING_SIZE 1024
+#define HINIC_MAX_LRO_SIZE 65536
/*
* vlan_id is a 12 bit number.
/*
* vlan_id is a 12 bit number.
nic_dev->rxqs[queue_idx] = rxq;
/* alloc rx sq hw wqepage*/
nic_dev->rxqs[queue_idx] = rxq;
/* alloc rx sq hw wqepage*/
- rc = hinic_create_rq(hwdev, queue_idx, rq_depth);
+ rc = hinic_create_rq(hwdev, queue_idx, rq_depth, socket_id);
if (rc) {
PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
queue_idx, dev->data->name, rq_depth);
if (rc) {
PMD_DRV_LOG(ERR, "Create rxq[%d] failed, dev_name: %s, rq_depth: %d",
queue_idx, dev->data->name, rq_depth);
rxq->q_depth = rq_depth;
rxq->buf_len = (u16)buf_size;
rxq->rx_free_thresh = rx_free_thresh;
rxq->q_depth = rq_depth;
rxq->buf_len = (u16)buf_size;
rxq->rx_free_thresh = rx_free_thresh;
+ rxq->socket_id = socket_id;
/* the last point cant do mbuf rearm in bulk */
rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh;
/* the last point cant do mbuf rearm in bulk */
rxq->rxinfo_align_end = rxq->q_depth - rxq->rx_free_thresh;
nic_dev->txqs[queue_idx] = txq;
/* alloc tx sq hw wqepage */
nic_dev->txqs[queue_idx] = txq;
/* alloc tx sq hw wqepage */
- rc = hinic_create_sq(hwdev, queue_idx, sq_depth);
+ rc = hinic_create_sq(hwdev, queue_idx, sq_depth, socket_id);
if (rc) {
PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d",
queue_idx, dev->data->name, sq_depth);
if (rc) {
PMD_DRV_LOG(ERR, "Create txq[%d] failed, dev_name: %s, sq_depth: %d",
queue_idx, dev->data->name, sq_depth);
txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) -
sizeof(struct hinic_sq_bufdesc);
txq->cos = nic_dev->default_cos;
txq->sq_bot_sge_addr = HINIC_GET_WQ_TAIL(txq) -
sizeof(struct hinic_sq_bufdesc);
txq->cos = nic_dev->default_cos;
+ txq->socket_id = socket_id;
/* alloc software txinfo */
rc = hinic_setup_tx_resources(txq);
/* alloc software txinfo */
rc = hinic_setup_tx_resources(txq);
nic_dev->hwdev->nic_io->rq_buf_size = buf_size;
}
nic_dev->hwdev->nic_io->rq_buf_size = buf_size;
}
-int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id, u16 rq_depth)
+int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 rq_depth, unsigned int socket_id)
{
int err;
struct hinic_nic_io *nic_io = hwdev->nic_io;
{
int err;
struct hinic_nic_io *nic_io = hwdev->nic_io;
nic_io->rq_depth = rq_depth;
err = hinic_wq_allocate(hwdev, &nic_io->rq_wq[q_id],
nic_io->rq_depth = rq_depth;
err = hinic_wq_allocate(hwdev, &nic_io->rq_wq[q_id],
- HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth);
+ HINIC_RQ_WQEBB_SHIFT, nic_io->rq_depth, socket_id);
if (err) {
PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ");
return err;
}
rq->wq = &nic_io->rq_wq[q_id];
if (err) {
PMD_DRV_LOG(ERR, "Failed to allocate WQ for RQ");
return err;
}
rq->wq = &nic_io->rq_wq[q_id];
- rq->pi_virt_addr =
- (volatile u16 *)dma_zalloc_coherent(hwdev, HINIC_PAGE_SIZE,
- &rq->pi_dma_addr,
- GFP_KERNEL);
+ rq->pi_virt_addr = (volatile u16 *)dma_zalloc_coherent(hwdev,
+ HINIC_PAGE_SIZE, &rq->pi_dma_addr, socket_id);
if (!rq->pi_virt_addr) {
PMD_DRV_LOG(ERR, "Failed to allocate rq pi virt addr");
err = -ENOMEM;
if (!rq->pi_virt_addr) {
PMD_DRV_LOG(ERR, "Failed to allocate rq pi virt addr");
err = -ENOMEM;
memset(rxq_stats, 0, sizeof(*rxq_stats));
}
memset(rxq_stats, 0, sizeof(*rxq_stats));
}
-static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq)
+static int hinic_rx_alloc_cqe(struct hinic_rxq *rxq, unsigned int socket_id)
{
size_t cqe_mem_size;
cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
{
size_t cqe_mem_size;
cqe_mem_size = sizeof(struct hinic_rq_cqe) * rxq->q_depth;
- rxq->cqe_start_vaddr =
- dma_zalloc_coherent(rxq->nic_dev->hwdev,
- cqe_mem_size, &rxq->cqe_start_paddr,
- GFP_KERNEL);
+ rxq->cqe_start_vaddr = dma_zalloc_coherent(rxq->nic_dev->hwdev,
+ cqe_mem_size, &rxq->cqe_start_paddr, socket_id);
if (!rxq->cqe_start_vaddr) {
PMD_DRV_LOG(ERR, "Allocate cqe dma memory failed");
return -ENOMEM;
if (!rxq->cqe_start_vaddr) {
PMD_DRV_LOG(ERR, "Allocate cqe dma memory failed");
return -ENOMEM;
int err, pkts;
rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
int err, pkts;
rx_info_sz = rxq->q_depth * sizeof(*rxq->rx_info);
- rxq->rx_info = kzalloc_aligned(rx_info_sz, GFP_KERNEL);
+ rxq->rx_info = rte_zmalloc_socket("rx_info", rx_info_sz,
+ RTE_CACHE_LINE_SIZE, rxq->socket_id);
if (!rxq->rx_info)
return -ENOMEM;
if (!rxq->rx_info)
return -ENOMEM;
- err = hinic_rx_alloc_cqe(rxq);
+ err = hinic_rx_alloc_cqe(rxq, rxq->socket_id);
if (err) {
PMD_DRV_LOG(ERR, "Allocate rx cqe failed");
goto rx_cqe_err;
if (err) {
PMD_DRV_LOG(ERR, "Allocate rx cqe failed");
goto rx_cqe_err;
hinic_rx_free_cqe(rxq);
rx_cqe_err:
hinic_rx_free_cqe(rxq);
rx_cqe_err:
+ rte_free(rxq->rx_info);
rxq->rx_info = NULL;
return err;
rxq->rx_info = NULL;
return err;
return;
hinic_rx_free_cqe(rxq);
return;
hinic_rx_free_cqe(rxq);
+ rte_free(rxq->rx_info);
u16 rx_free_thresh;
u16 rxinfo_align_end;
u16 rx_free_thresh;
u16 rxinfo_align_end;
unsigned long status;
struct hinic_rxq_stats rxq_stats;
unsigned long status;
struct hinic_rxq_stats rxq_stats;
void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev);
void hinic_get_func_rx_buf_size(struct hinic_nic_dev *nic_dev);
-int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id, u16 rq_depth);
+int hinic_create_rq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 rq_depth, unsigned int socket_id);
void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id);
void hinic_destroy_rq(struct hinic_hwdev *hwdev, u16 q_id);
u64 tx_info_sz;
tx_info_sz = txq->q_depth * sizeof(*txq->tx_info);
u64 tx_info_sz;
tx_info_sz = txq->q_depth * sizeof(*txq->tx_info);
- txq->tx_info = kzalloc_aligned(tx_info_sz, GFP_KERNEL);
+ txq->tx_info = rte_zmalloc_socket("tx_info", tx_info_sz,
+ RTE_CACHE_LINE_SIZE, txq->socket_id);
if (!txq->tx_info)
return -ENOMEM;
if (!txq->tx_info)
return -ENOMEM;
if (txq->tx_info == NULL)
return;
if (txq->tx_info == NULL)
return;
+ rte_free(txq->tx_info);
-int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, u16 sq_depth)
+int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 sq_depth, unsigned int socket_id)
{
int err;
struct hinic_nic_io *nic_io = hwdev->nic_io;
{
int err;
struct hinic_nic_io *nic_io = hwdev->nic_io;
/* alloc wq */
err = hinic_wq_allocate(nic_io->hwdev, &nic_io->sq_wq[q_id],
/* alloc wq */
err = hinic_wq_allocate(nic_io->hwdev, &nic_io->sq_wq[q_id],
- HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth);
+ HINIC_SQ_WQEBB_SHIFT, nic_io->sq_depth,
+ socket_id);
if (err) {
PMD_DRV_LOG(ERR, "Failed to allocate WQ for SQ");
return err;
if (err) {
PMD_DRV_LOG(ERR, "Failed to allocate WQ for SQ");
return err;
u16 q_id;
u16 q_depth;
u32 cos;
u16 q_id;
u16 q_depth;
u32 cos;
/* cacheline1 */
struct hinic_txq_stats txq_stats;
/* cacheline1 */
struct hinic_txq_stats txq_stats;
void hinic_txq_stats_reset(struct hinic_txq *txq);
void hinic_txq_stats_reset(struct hinic_txq *txq);
-int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id, u16 sq_depth);
+int hinic_create_sq(struct hinic_hwdev *hwdev, u16 q_id,
+ u16 sq_depth, unsigned int socket_id);
void hinic_destroy_sq(struct hinic_hwdev *hwdev, u16 q_id);
void hinic_destroy_sq(struct hinic_hwdev *hwdev, u16 q_id);