/* Calc tick */
if (shaper_level >= HNS3_SHAPER_LVL_CNT) {
hns3_err(hw,
- "shaper_level(%d) is greater than HNS3_SHAPER_LVL_CNT(%d)",
+ "shaper_level(%u) is greater than HNS3_SHAPER_LVL_CNT(%d)",
shaper_level, HNS3_SHAPER_LVL_CNT);
return -EINVAL;
}
if (ir > hw->max_tm_rate) {
- hns3_err(hw, "rate(%d) exceeds the max rate(%d) driver "
+ hns3_err(hw, "rate(%u) exceeds the max rate(%u) driver "
"supported.", ir, hw->max_tm_rate);
return -EINVAL;
}
pause_time <= PAUSE_TIME_DIV_BY * HNS3_DEFAULT_PAUSE_TRANS_GAP)
pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
else {
- hns3_warn(hw, "pause_time(%d) is adjusted to 4", pause_time);
+ hns3_warn(hw, "pause_time(%u) is adjusted to 4", pause_time);
pause_time = PAUSE_TIME_MIN_VALUE;
pause_trans_gap = pause_time / PAUSE_TIME_DIV_BY - 1;
}
return -EINVAL;
if (nb_rx_q < num_tc) {
- hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+ hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
nb_rx_q, num_tc);
return -EINVAL;
}
if (nb_tx_q < num_tc) {
- hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+ hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
nb_tx_q, num_tc);
return -EINVAL;
}
hns3_warn(hw,
"hw_vlan_reject_tagged or hw_vlan_reject_untagged "
"configuration is not supported! Ignore these two "
- "parameters: hw_vlan_reject_tagged(%d), "
- "hw_vlan_reject_untagged(%d)",
+ "parameters: hw_vlan_reject_tagged(%u), "
+ "hw_vlan_reject_untagged(%u)",
txmode->hw_vlan_reject_tagged,
txmode->hw_vlan_reject_untagged);
ret = hns3_vlan_pvid_set(dev, txmode->pvid,
txmode->hw_vlan_insert_pvid);
if (ret)
- hns3_err(hw, "dev config vlan pvid(%d) failed, ret = %d",
+ hns3_err(hw, "dev config vlan pvid(%u) failed, ret = %d",
txmode->pvid, ret);
return ret;
uint32_t j;
if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
- hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) "
+ hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
"invalid. valid range: 0~%d",
nb_mc_addr, HNS3_MC_MACADDR_NUM);
return -EINVAL;
for (i = 0; i < HNS3_MAX_USER_PRIO; i++) {
if (dcb_rx_conf->dcb_tc[i] != dcb_tx_conf->dcb_tc[i]) {
- hns3_err(hw, "dcb_tc[%d] = %d in rx direction, "
+ hns3_err(hw, "dcb_tc[%d] = %u in rx direction, "
"is not equal to one in tx direction.",
i, dcb_rx_conf->dcb_tc[i]);
return -EINVAL;
op_str = mmap ? "Map" : "Unmap";
status = hns3_cmd_send(hw, &desc, 1);
if (status) {
- hns3_err(hw, "%s TQP %d fail, vector_id is %d, status is %d.",
+ hns3_err(hw, "%s TQP %u fail, vector_id is %u, status is %d.",
op_str, queue_id, req->int_vector_id, status);
return status;
}
HNS3_RING_TYPE_TX, i);
if (ret) {
PMD_INIT_LOG(ERR, "PF fail to unbind TX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
HNS3_RING_TYPE_RX, i);
if (ret) {
PMD_INIT_LOG(ERR, "PF fail to unbind RX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
}
ret = hns3_parse_speed(cfg.default_speed, &hw->mac.link_speed);
if (ret) {
- PMD_INIT_LOG(ERR, "Get wrong speed %d, ret = %d",
+ PMD_INIT_LOG(ERR, "Get wrong speed %u, ret = %d",
cfg.default_speed, ret);
return ret;
}
if (cmdq_resp) {
PMD_INIT_LOG(ERR,
- "cmdq execute failed for get_mac_ethertype_cmd_status, status=%d.\n",
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
cmdq_resp);
return -EIO;
}
break;
default:
PMD_INIT_LOG(ERR,
- "add mac ethertype failed for undefined, code=%d.",
+ "add mac ethertype failed for undefined, code=%u.",
resp_code);
return_status = -EIO;
break;
hns3_promisc_param_init(¶m, false, false, false, func_id);
ret = hns3_cmd_set_promisc_mode(hw, ¶m);
if (ret) {
- PMD_INIT_LOG(ERR, "failed to clear vf:%d promisc mode,"
+ PMD_INIT_LOG(ERR, "failed to clear vf:%u promisc mode,"
" ret = %d", func_id, ret);
return ret;
}
rte_zmalloc("intr_vec",
hw->used_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- hns3_err(hw, "Failed to allocate %d rx_queues"
+ hns3_err(hw, "Failed to allocate %u rx_queues"
" intr_vec", hw->used_rx_queues);
ret = -ENOMEM;
goto alloc_intr_vec_error;
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
hns3_mp_uninit_primary();
- hns3_warn(hw, "Close port %d finished", hw->data->port_id);
+ hns3_warn(hw, "Close port %u finished", hw->data->port_id);
return ret;
}
return -EINVAL;
}
if (!fc_conf->pause_time) {
- hns3_err(hw, "Invalid pause time %d setting.",
+ hns3_err(hw, "Invalid pause time %u setting.",
fc_conf->pause_time);
return -EINVAL;
}
return -EINVAL;
}
if (pfc_conf->fc.pause_time == 0) {
- hns3_err(hw, "Invalid pause time %d setting.",
+ hns3_err(hw, "Invalid pause time %u setting.",
pfc_conf->fc.pause_time);
return -EINVAL;
}
uint32_t j;
if (nb_mc_addr > HNS3_MC_MACADDR_NUM) {
- hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%d) "
+ hns3_err(hw, "failed to set mc mac addr, nb_mc_addr(%u) "
"invalid. valid range: 0~%d",
nb_mc_addr, HNS3_MC_MACADDR_NUM);
return -EINVAL;
ret = hns3_send_mbx_msg(hw, code, 0, (uint8_t *)&bind_msg,
sizeof(bind_msg), false, NULL, 0);
if (ret)
- hns3_err(hw, "%s TQP %d fail, vector_id is %d, ret is %d.",
+ hns3_err(hw, "%s TQP %u fail, vector_id is %u, ret is %d.",
op_str, queue_id, bind_msg.vector_id, ret);
return ret;
HNS3_RING_TYPE_TX, i);
if (ret) {
PMD_INIT_LOG(ERR, "VF fail to unbind TX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
HNS3_RING_TYPE_RX, i);
if (ret) {
PMD_INIT_LOG(ERR, "VF fail to unbind RX ring(%d) with "
- "vector: %d, ret=%d", i, vec, ret);
+ "vector: %u, ret=%d", i, vec, ret);
return ret;
}
}
struct hns3_hw *hw = &hns->hw;
if (nb_rx_q < hw->num_tc) {
- hns3_err(hw, "number of Rx queues(%d) is less than tcs(%d).",
+ hns3_err(hw, "number of Rx queues(%u) is less than tcs(%u).",
nb_rx_q, hw->num_tc);
return -EINVAL;
}
if (nb_tx_q < hw->num_tc) {
- hns3_err(hw, "number of Tx queues(%d) is less than tcs(%d).",
+ hns3_err(hw, "number of Tx queues(%u) is less than tcs(%u).",
nb_tx_q, hw->num_tc);
return -EINVAL;
}
rte_free(eth_dev->process_private);
eth_dev->process_private = NULL;
hns3_mp_uninit_primary();
- hns3_warn(hw, "Close port %d finished", hw->data->port_id);
+ hns3_warn(hw, "Close port %u finished", hw->data->port_id);
return ret;
}
rte_zmalloc("intr_vec",
hw->used_rx_queues * sizeof(int), 0);
if (intr_handle->intr_vec == NULL) {
- hns3_err(hw, "Failed to allocate %d rx_queues"
+ hns3_err(hw, "Failed to allocate %u rx_queues"
" intr_vec", hw->used_rx_queues);
ret = -ENOMEM;
goto vf_alloc_intr_vec_error;
hns3_warn(hw, "Unsupported tunnel filter in 4K*200Bit");
break;
default:
- hns3_err(hw, "Unsupported flow director mode %d",
+ hns3_err(hw, "Unsupported flow director mode %u",
pf->fdir.fd_cfg.fd_mode);
return -EOPNOTSUPP;
}
key_conf->mask.ip_proto);
break;
default:
- hns3_warn(hw, "not support tuple of (%d)", tuple);
+ hns3_warn(hw, "not support tuple of (%u)", tuple);
break;
}
return true;
ret = hns3_fd_tcam_config(hw, false, rule->location, key_y, true);
if (ret) {
- hns3_err(hw, "Config fd key_y fail, loc=%d, ret=%d",
+ hns3_err(hw, "Config fd key_y fail, loc=%u, ret=%d",
rule->queue_id, ret);
return ret;
}
ret = hns3_fd_tcam_config(hw, true, rule->location, key_x, true);
if (ret)
- hns3_err(hw, "Config fd key_x fail, loc=%d, ret=%d",
+ hns3_err(hw, "Config fd key_x fail, loc=%u, ret=%d",
rule->queue_id, ret);
return ret;
}
ret = hns3_fd_tcam_config(hw, true, rule->location, NULL,
false);
if (ret)
- hns3_err(hw, "Failed to delete fdir: %d src_ip:%x "
- "dst_ip:%x src_port:%d dst_port:%d ret = %d",
+ hns3_err(hw, "Failed to delete fdir: %u src_ip:%x "
+ "dst_ip:%x src_port:%u dst_port:%u ret = %d",
rule->location,
rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
ret = hns3_config_key(hns, rule);
rte_spinlock_unlock(&fdir_info->flows_lock);
if (ret) {
- hns3_err(hw, "Failed to config fdir: %d src_ip:%x dst_ip:%x "
- "src_port:%d dst_port:%d ret = %d",
+ hns3_err(hw, "Failed to config fdir: %u src_ip:%x dst_ip:%x "
+ "src_port:%u dst_port:%u ret = %d",
rule->location,
rule->key_conf.spec.src_ip[IP_ADDR_KEY_ID],
rule->key_conf.spec.dst_ip[IP_ADDR_KEY_ID],
queue = (const struct rte_flow_action_queue *)action->conf;
if (queue->index >= hw->used_rx_queues) {
- hns3_err(hw, "queue ID(%d) is greater than number of "
- "available queue (%d) in driver.",
+ hns3_err(hw, "queue ID(%u) is greater than number of "
+ "available queue (%u) in driver.",
queue->index, hw->used_rx_queues);
return rte_flow_error_set(error, EINVAL,
RTE_FLOW_ERROR_TYPE_ACTION_CONF,
uint64_t end;
if (resp_len > HNS3_MBX_MAX_RESP_DATA_SIZE) {
- hns3_err(hw, "VF mbx response len(=%d) exceeds maximum(=%d)",
+ hns3_err(hw, "VF mbx response len(=%u) exceeds maximum(=%d)",
resp_len, HNS3_MBX_MAX_RESP_DATA_SIZE);
return -EINVAL;
}
if (now >= end) {
hw->mbx_resp.lost++;
hns3_err(hw,
- "VF could not get mbx(%d,%d) head(%d) tail(%d) lost(%d) from PF in_irq:%d",
+ "VF could not get mbx(%u,%u) head(%u) tail(%u) lost(%u) from PF in_irq:%d",
code0, code1, hw->mbx_resp.head, hw->mbx_resp.tail,
hw->mbx_resp.lost, in_irq);
return -ETIME;
/* first two bytes are reserved for code & subcode */
if (msg_len > (HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET)) {
hns3_err(hw,
- "VF send mbx msg fail, msg len %d exceeds max payload len %d",
+ "VF send mbx msg fail, msg len %u exceeds max payload len %d",
msg_len, HNS3_MBX_MAX_MSG_SIZE - HNS3_CMD_CODE_OFFSET);
return -EINVAL;
}
hns3_schedule_reset(HNS3_DEV_HW_TO_ADAPTER(hw));
break;
default:
- hns3_err(hw, "Fetched unsupported(%d) message from arq",
+ hns3_err(hw, "Fetched unsupported(%u) message from arq",
opcode);
break;
}
if (resp->lost)
resp->lost--;
hns3_warn(hw, "Received a mismatched response req_msg(%x) "
- "resp_msg(%x) head(%d) tail(%d) lost(%d)",
+ "resp_msg(%x) head(%u) tail(%u) lost(%u)",
resp->req_msg_data, resp_msg, resp->head, tail,
resp->lost);
} else if (tail + resp->lost > resp->head) {
resp->lost--;
hns3_warn(hw, "Received a new response again resp_msg(%x) "
- "head(%d) tail(%d) lost(%d)", resp_msg,
+ "head(%u) tail(%u) lost(%u)", resp_msg,
resp->head, tail, resp->lost);
}
rte_io_wmb();
flag = rte_le_to_cpu_16(crq->desc[crq->next_to_use].flag);
if (unlikely(!hns3_get_bit(flag, HNS3_CMDQ_RX_OUTVLD_B))) {
hns3_warn(hw,
- "dropped invalid mailbox message, code = %d",
+ "dropped invalid mailbox message, code = %u",
opcode);
/* dropping/not processing this invalid message */
break;
default:
hns3_err(hw,
- "VF received unsupported(%d) mbx msg from PF",
+ "VF received unsupported(%u) mbx msg from PF",
req->msg[0]);
break;
}
if (!rte_eth_dev_is_valid_port(param->port_id)) {
rte_errno = ENODEV;
- PMD_INIT_LOG(ERR, "port %u invalid port ID", param->port_id);
+ PMD_INIT_LOG(ERR, "port %d invalid port ID", param->port_id);
return -rte_errno;
}
dev = &rte_eth_devices[param->port_id];
for (i = 0; i < rxq->nb_rx_desc; i++) {
mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
if (unlikely(mbuf == NULL)) {
- hns3_err(hw, "Failed to allocate RXD[%d] for rx queue!",
+ hns3_err(hw, "Failed to allocate RXD[%u] for rx queue!",
i);
hns3_rx_queue_release_mbufs(rxq);
return -ENOMEM;
rxq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_rx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (rxq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u rx ring!",
q_info->idx);
return NULL;
}
rx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (rx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d rx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u rx ring!",
q_info->idx);
hns3_rx_queue_release(rxq);
return NULL;
rxq->rx_ring = (struct hns3_desc *)rx_mz->addr;
rxq->rx_ring_phys_addr = rx_mz->iova;
- hns3_dbg(hw, "No.%d rx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u rx descriptors iova 0x%" PRIx64, q_info->idx,
rxq->rx_ring_phys_addr);
return rxq;
q_info.ring_name = "rx_fake_ring";
rxq = hns3_alloc_rxq_and_dma_zone(dev, &q_info);
if (rxq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake rx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake rx ring.", idx);
return -ENOMEM;
}
txq = rte_zmalloc_socket(q_info->type, sizeof(struct hns3_tx_queue),
RTE_CACHE_LINE_SIZE, q_info->socket_id);
if (txq == NULL) {
- hns3_err(hw, "Failed to allocate memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to allocate memory for No.%u tx ring!",
q_info->idx);
return NULL;
}
tx_desc, HNS3_RING_BASE_ALIGN,
q_info->socket_id);
if (tx_mz == NULL) {
- hns3_err(hw, "Failed to reserve DMA memory for No.%d tx ring!",
+ hns3_err(hw, "Failed to reserve DMA memory for No.%u tx ring!",
q_info->idx);
hns3_tx_queue_release(txq);
return NULL;
txq->tx_ring = (struct hns3_desc *)tx_mz->addr;
txq->tx_ring_phys_addr = tx_mz->iova;
- hns3_dbg(hw, "No.%d tx descriptors iova 0x%" PRIx64, q_info->idx,
+ hns3_dbg(hw, "No.%u tx descriptors iova 0x%" PRIx64, q_info->idx,
txq->tx_ring_phys_addr);
/* Clear tx bd */
q_info.ring_name = "tx_fake_ring";
txq = hns3_alloc_txq_and_dma_zone(dev, &q_info);
if (txq == NULL) {
- hns3_err(hw, "Failed to setup No.%d fake tx ring.", idx);
+ hns3_err(hw, "Failed to setup No.%u fake tx ring.", idx);
return -ENOMEM;
}
if (rs_thresh + free_thresh > nb_desc || nb_desc % rs_thresh ||
rs_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP ||
free_thresh >= nb_desc - HNS3_TX_RS_FREE_THRESH_GAP) {
- hns3_err(hw, "tx_rs_thresh (%d) tx_free_thresh (%d) nb_desc "
- "(%d) of tx descriptors for port=%d queue=%d check "
+ hns3_err(hw, "tx_rs_thresh (%u) tx_free_thresh (%u) nb_desc "
+ "(%u) of tx descriptors for port=%u queue=%u check "
"fail!",
rs_thresh, free_thresh, nb_desc, hw->data->port_id,
idx);
desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
- hns3_err(hw, "Failed to query RX No.%d queue stat: %d",
+ hns3_err(hw, "Failed to query RX No.%u queue stat: %d",
i, ret);
return ret;
}
desc.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc, 1);
if (ret) {
- hns3_err(hw, "Failed to query TX No.%d queue stat: %d",
+ hns3_err(hw, "Failed to query TX No.%u queue stat: %d",
i, ret);
return ret;
}
desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc_reset, 1);
if (ret) {
- hns3_err(hw, "Failed to reset RX No.%d queue stat: %d",
+ hns3_err(hw, "Failed to reset RX No.%u queue stat: %d",
i, ret);
return ret;
}
desc_reset.data[0] = rte_cpu_to_le_32((uint32_t)i);
ret = hns3_cmd_send(hw, &desc_reset, 1);
if (ret) {
- hns3_err(hw, "Failed to reset TX No.%d queue stat: %d",
+ hns3_err(hw, "Failed to reset TX No.%u queue stat: %d",
i, ret);
return ret;
}
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
- hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, "
+ hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
"should < %u", i, ids[i], cnt_stats);
rte_free(values_copy);
return -EINVAL;
for (i = 0; i < size; i++) {
if (ids[i] >= cnt_stats) {
- hns3_err(hw, "ids[%d] (%" PRIx64 ") is invalid, "
+ hns3_err(hw, "ids[%u] (%" PRIx64 ") is invalid, "
"should < %u", i, ids[i], cnt_stats);
rte_free(names_copy);
return -EINVAL;