hw->hw_addr = (uint8_t *)(pci_dev->mem_resource[0].addr);
if (!hw->hw_addr) {
PMD_INIT_LOG(ERR, "Hardware is not available, "
- "as address is NULL\n");
+ "as address is NULL\n");
return -ENODEV;
}
PMD_INIT_LOG(ERR, "Failed to init adminq: %d", ret);
return -EIO;
}
- PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM "
- "%02d.%02d.%02d eetrack %04x\n",
- hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
- hw->aq.api_maj_ver, hw->aq.api_min_ver,
- ((hw->nvm.version >> 12) & 0xf),
- ((hw->nvm.version >> 4) & 0xff),
- (hw->nvm.version & 0xf), hw->nvm.eetrack);
+ PMD_INIT_LOG(INFO, "FW %d.%d API %d.%d NVM %02d.%02d.%02d eetrack %04x\n",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ ((hw->nvm.version >> 12) & 0xf),
+ ((hw->nvm.version >> 4) & 0xff),
+ (hw->nvm.version & 0xf), hw->nvm.eetrack);
/* Disable LLDP */
ret = i40e_aq_stop_lldp(hw, true, NULL);
if ((dev->data->dev_conf.link_duplex != ETH_LINK_AUTONEG_DUPLEX) &&
(dev->data->dev_conf.link_duplex != ETH_LINK_FULL_DUPLEX)) {
PMD_INIT_LOG(ERR, "Invalid link_duplex (%hu) for port %hhu\n",
- dev->data->dev_conf.link_duplex,
- dev->data->port_id);
+ dev->data->dev_conf.link_duplex,
+ dev->data->port_id);
return -EINVAL;
}
pf->vf_nb_qps = RTE_LIBRTE_I40E_QUEUE_NUM_PER_VF;
if (dev->pci_dev->max_vfs > hw->func_caps.num_vfs) {
PMD_INIT_LOG(ERR, "Config VF number %u, "
- "max supported %u.\n", dev->pci_dev->max_vfs,
- hw->func_caps.num_vfs);
+ "max supported %u.\n",
+ dev->pci_dev->max_vfs,
+ hw->func_caps.num_vfs);
return -EINVAL;
}
if (pf->vf_nb_qps > I40E_MAX_QP_NUM_PER_VF) {
PMD_INIT_LOG(ERR, "FVL VF queue %u, "
- "max support %u queues.\n", pf->vf_nb_qps,
- I40E_MAX_QP_NUM_PER_VF);
+ "max support %u queues.\n",
+ pf->vf_nb_qps, I40E_MAX_QP_NUM_PER_VF);
return -EINVAL;
}
pf->vf_num = dev->pci_dev->max_vfs;
sum_queues += pf->vf_nb_qps * pf->vf_num;
sum_vsis += pf->vf_num;
PMD_INIT_LOG(INFO, "Max VF num:%u each has queue pairs:%u\n",
- pf->vf_num, pf->vf_nb_qps);
+ pf->vf_num, pf->vf_nb_qps);
} else
pf->vf_num = 0;
sum_queues > hw->func_caps.num_rx_qp) {
PMD_INIT_LOG(ERR, "VSI/QUEUE setting can't be satisfied\n");
PMD_INIT_LOG(ERR, "Max VSIs: %u, asked:%u\n",
- pf->max_num_vsi, sum_vsis);
+ pf->max_num_vsi, sum_vsis);
PMD_INIT_LOG(ERR, "Total queue pairs:%u, asked:%u\n",
- hw->func_caps.num_rx_qp, sum_queues);
+ hw->func_caps.num_rx_qp, sum_queues);
return -EINVAL;
}
/* Each VSI occupy 1 MSIX interrupt at least, plus IRQ0 for misc intr cause */
if (sum_vsis > hw->func_caps.num_msix_vectors - 1) {
- PMD_INIT_LOG(ERR, "Too many VSIs(%u), MSIX intr(%u) not enough\n",
- sum_vsis, hw->func_caps.num_msix_vectors);
+ PMD_INIT_LOG(ERR, "Too many VSIs(%u), "
+ "MSIX intr(%u) not enough\n",
+ sum_vsis, hw->func_caps.num_msix_vectors);
return -EINVAL;
}
return I40E_SUCCESS;
entry = rte_zmalloc("i40e", sizeof(*entry), 0);
if (entry == NULL) {
- PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "resource pool\n");
+ PMD_DRV_LOG(ERR, "Failed to allocate memory for resource pool\n");
return -ENOMEM;
}
if (pool->num_free < num) {
PMD_DRV_LOG(ERR, "No resource. ask:%u, available:%u\n",
- num, pool->num_free);
+ num, pool->num_free);
return -ENOMEM;
}
entry = rte_zmalloc("res_pool", sizeof(*entry), 0);
if (entry == NULL) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "resource pool\n");
+ "resource pool\n");
return -ENOMEM;
}
entry->base = valid_entry->base;
/* If DCB is not supported, only default TC is supported */
if (!hw->func_caps.dcb && enabled_tcmap != I40E_DEFAULT_TCMAP) {
- PMD_DRV_LOG(ERR, "DCB is not enabled, "
- "only TC0 is supported\n");
+ PMD_DRV_LOG(ERR, "DCB is not enabled, only TC0 is supported\n");
return -EINVAL;
}
if (!bitmap_is_subset(hw->func_caps.enabled_tcmap, enabled_tcmap)) {
PMD_DRV_LOG(ERR, "Enabled TC map 0x%x not applicable to "
- "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
- enabled_tcmap);
+ "HW support 0x%x\n", hw->func_caps.enabled_tcmap,
+ enabled_tcmap);
return -EINVAL;
}
return I40E_SUCCESS;
if (NULL == pf || vsi == NULL) {
PMD_DRV_LOG(ERR, "veb setup failed, "
- "associated VSI shouldn't null\n");
+ "associated VSI shouldn't null\n");
return NULL;
}
hw = I40E_PF_TO_HW(pf);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d\n",
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
goto fail;
}
&veb->stats_idx, NULL, NULL, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Get veb statics index failed, aq_err: %d\n",
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
goto fail;
}
struct i40e_mac_filter *f;
PMD_DRV_LOG(WARNING, "Cannot remove the default "
- "macvlan filter\n");
+ "macvlan filter\n");
/* It needs to add the permanent mac into mac list */
f = rte_zmalloc("macv_filter", sizeof(*f), 0);
if (f == NULL) {
memset(&bw_config, 0, sizeof(bw_config));
ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
if (ret != I40E_SUCCESS) {
- PMD_DRV_LOG(ERR, "VSI failed to get bandwidth "
- "configuration %u\n", hw->aq.asq_last_status);
+ PMD_DRV_LOG(ERR, "VSI failed to get bandwidth configuration %u\n",
+ hw->aq.asq_last_status);
return ret;
}
&ets_sla_config, NULL);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "VSI failed to get TC bandwdith "
- "configuration %u\n", hw->aq.asq_last_status);
+ "configuration %u\n", hw->aq.asq_last_status);
return ret;
}
PMD_DRV_LOG(INFO, "VSI max_bw:%u\n", bw_config.max_bw);
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
PMD_DRV_LOG(INFO, "\tVSI TC%u:share credits %u\n", i,
- ets_sla_config.share_credits[i]);
+ ets_sla_config.share_credits[i]);
PMD_DRV_LOG(INFO, "\tVSI TC%u:credits %u\n", i,
- rte_le_to_cpu_16(ets_sla_config.credits[i]));
+ rte_le_to_cpu_16(ets_sla_config.credits[i]));
PMD_DRV_LOG(INFO, "\tVSI TC%u: max credits: %u", i,
- rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
- (i * 4));
+ rte_le_to_cpu_16(ets_sla_config.credits[i / 4]) >>
+ (i * 4));
}
return 0;
if (type != I40E_VSI_MAIN && uplink_vsi == NULL) {
PMD_DRV_LOG(ERR, "VSI setup failed, "
- "VSI link shouldn't be NULL\n");
+ "VSI link shouldn't be NULL\n");
return NULL;
}
if (type == I40E_VSI_MAIN && uplink_vsi != NULL) {
PMD_DRV_LOG(ERR, "VSI setup failed, MAIN VSI "
- "uplink VSI should be NULL\n");
+ "uplink VSI should be NULL\n");
return NULL;
}
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping\n");
+ "TC queue mapping\n");
goto fail_msix_alloc;
}
ctxt.seid = vsi->seid;
I40E_DEFAULT_TCMAP);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to configure "
- "TC queue mapping\n");
+ "TC queue mapping\n");
goto fail_msix_alloc;
}
ctxt.info.up_enable_bits = I40E_DEFAULT_TCMAP;
ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
if (ret) {
PMD_DRV_LOG(ERR, "add vsi failed, aq_err=%d\n",
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
goto fail_msix_alloc;
}
memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
if (ret)
PMD_DRV_LOG(INFO, "Update VSI failed to %s vlan stripping\n",
- on ? "enable" : "disable");
+ on ? "enable" : "disable");
return ret;
}
/* Check if it is timeout */
if (j >= I40E_CHK_Q_ENA_COUNT) {
PMD_DRV_LOG(ERR, "Failed to %s tx queue[%u]\n",
- (on ? "enable" : "disable"), q_idx);
+ (on ? "enable" : "disable"), q_idx);
return I40E_ERR_TIMEOUT;
}
/* Check if it is timeout */
if (j >= I40E_CHK_Q_ENA_COUNT) {
PMD_DRV_LOG(ERR, "Failed to %s rx queue[%u]\n",
- (on ? "enable" : "disable"), q_idx);
+ (on ? "enable" : "disable"), q_idx);
return I40E_ERR_TIMEOUT;
}
ret = i40e_rx_queue_init(data->rx_queues[i]);
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Failed to do RX queue "
- "initialization\n");
+ "initialization\n");
break;
}
}
if (ret != I40E_SUCCESS) {
PMD_DRV_LOG(INFO, "Failed to read msg from AdminQ, "
- "aq_err: %u\n", hw->aq.asq_last_status);
+ "aq_err: %u\n", hw->aq.asq_last_status);
break;
}
opcode = rte_le_to_cpu_16(info.desc.opcode);
break;
default:
PMD_DRV_LOG(ERR, "Request %u is not supported yet\n",
- opcode);
+ opcode);
break;
}
/* Reset the buffer after processing one */
/* Shared IRQ case, return */
if (!(cause & I40E_PFINT_ICR0_INTEVENT_MASK)) {
PMD_DRV_LOG(INFO, "Port%d INT0:share IRQ case, "
- "no INT event to process\n", hw->pf_id);
+ "no INT event to process\n", hw->pf_id);
goto done;
}
if (vsi->vfta[j] & (1 << k)) {
if (i > num - 1) {
PMD_DRV_LOG(ERR, "vlan number "
- "not match\n");
+ "not match\n");
return I40E_ERR_PARAM;
}
(void)rte_memcpy(&mv_f[i].macaddr,
vpe->event_data.link_event.link_status;
vf->pend_msg |= PFMSG_LINK_CHANGE;
PMD_DRV_LOG(INFO, "Link status update:%s\n",
- vf->link_up ? "up" : "down");
+ vf->link_up ? "up" : "down");
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
vf->vf_reset = true;
PMD_DRV_LOG(INFO, "PF driver closed\n");
break;
default:
- PMD_DRV_LOG(ERR,
- "%s: Unknown event %d from pf\n",
- __func__, vpe->event);
+ PMD_DRV_LOG(ERR, "%s: Unknown event %d from pf\n",
+ __func__, vpe->event);
}
} else {
/* async reply msg on command issued by vf previously */
PMD_DRV_LOG(ERR, "Failed to read message from AdminQ\n");
else if (args->ops != info.ops)
PMD_DRV_LOG(ERR, "command mismatch, expect %u, get %u\n",
- args->ops, info.ops);
+ args->ops, info.ops);
return (err | info.result);
}
else if ((pver->major != version.major) ||
(pver->minor != version.minor)) {
PMD_INIT_LOG(ERR, "pf/vf API version mismatch. "
- "(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
- version.major, version.minor);
+ "(%u.%u)-(%u.%u)\n", pver->major, pver->minor,
+ version.major, version.minor);
return -1;
}
err = i40evf_execute_vf_cmd(dev, &args);
if (err) {
- PMD_DRV_LOG(ERR, "fail to execute command "
- "OP_GET_VF_RESOURCE\n");
+ PMD_DRV_LOG(ERR, "fail to execute command OP_GET_VF_RESOURCE\n");
return err;
}
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
- "CONFIG_PROMISCUOUS_MODE\n");
+ "CONFIG_PROMISCUOUS_MODE\n");
return err;
}
err = i40evf_execute_vf_cmd(dev, &args);
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
- "OP_CONFIG_VSI_QUEUES\n");
+ "OP_CONFIG_VSI_QUEUES\n");
rte_free(queue_info);
return err;
args.out_size = I40E_AQ_BUF_SZ;
err = i40evf_execute_vf_cmd(dev, &args);
if (err)
- PMD_DRV_LOG(ERR, "fail to switch %s %u %s\n", isrx ? "RX" : "TX",
- qid, on ? "on" : "off");
+ PMD_DRV_LOG(ERR, "fail to switch %s %u %s\n",
+ isrx ? "RX" : "TX", qid, on ? "on" : "off");
return err;
}
if (rxq->start_rx_per_q)
continue;
if (i40evf_dev_rx_queue_start(dev, i) != 0) {
- PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
- i);
+ PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
return -1;
}
}
if (txq->start_tx_per_q)
continue;
if (i40evf_dev_tx_queue_start(dev, i) != 0) {
- PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
- i);
+ PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
return -1;
}
}
/* Stop TX queues first */
for (i = 0; i < dev->data->nb_tx_queues; i++) {
if (i40evf_dev_tx_queue_stop(dev, i) != 0) {
- PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
- i);
+ PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
return -1;
}
}
/* Then stop RX queues */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (i40evf_dev_rx_queue_stop(dev, i) != 0) {
- PMD_DRV_LOG(ERR, "Fail to start queue %u\n",
- i);
+ PMD_DRV_LOG(ERR, "Fail to start queue %u\n", i);
return -1;
}
}
if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Invalid mac:%x:%x:%x:%x:%x:%x\n",
- addr->addr_bytes[0], addr->addr_bytes[1],
- addr->addr_bytes[2], addr->addr_bytes[3],
- addr->addr_bytes[4], addr->addr_bytes[5]);
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
return -1;
}
err = i40evf_execute_vf_cmd(dev, &args);
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
- "OP_ADD_ETHER_ADDRESS\n");
+ "OP_ADD_ETHER_ADDRESS\n");
return err;
}
if (i40e_validate_mac_addr(addr->addr_bytes) != I40E_SUCCESS) {
PMD_DRV_LOG(ERR, "Invalid mac:%x-%x-%x-%x-%x-%x\n",
- addr->addr_bytes[0], addr->addr_bytes[1],
- addr->addr_bytes[2], addr->addr_bytes[3],
- addr->addr_bytes[4], addr->addr_bytes[5]);
+ addr->addr_bytes[0], addr->addr_bytes[1],
+ addr->addr_bytes[2], addr->addr_bytes[3],
+ addr->addr_bytes[4], addr->addr_bytes[5]);
return -1;
}
err = i40evf_execute_vf_cmd(dev, &args);
if (err)
PMD_DRV_LOG(ERR, "fail to execute command "
- "OP_DEL_ETHER_ADDRESS\n");
+ "OP_DEL_ETHER_ADDRESS\n");
return err;
}
if (err)
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
- rx_queue_id);
+ rx_queue_id);
}
return err;
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
- rx_queue_id);
+ rx_queue_id);
return err;
}
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
- tx_queue_id);
+ tx_queue_id);
}
return err;
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
- tx_queue_id);
+ tx_queue_id);
return err;
}
if (vf->max_pkt_len <= ETHER_MAX_LEN ||
vf->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled\n",
- (uint32_t)ETHER_MAX_LEN,
- (uint32_t)I40E_FRAME_SIZE_MAX);
+ "be larger than %u and smaller than %u,"
+ "as jumbo frame is enabled\n",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
if (vf->max_pkt_len < ETHER_MIN_LEN ||
vf->max_pkt_len > ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled\n",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled\n",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}
case I40E_VIRTCHNL_OP_FCOE:
PMD_DRV_LOG(ERR, "OP_FCOE received, not supported\n");
default:
- PMD_DRV_LOG(ERR, "%u received, not supported\n",
- opcode);
+ PMD_DRV_LOG(ERR, "%u received, not supported\n", opcode);
i40e_pf_host_send_msg_to_vf(vf, opcode,
I40E_ERR_PARAM, NULL, 0);
break;
uint16_t i, j;
PMD_RX_LOG(DEBUG, "Rx mbuf alloc failed for "
- "port_id=%u, queue_id=%u\n",
- rxq->port_id, rxq->queue_id);
+ "port_id=%u, queue_id=%u\n",
+ rxq->port_id, rxq->queue_id);
rxq->rx_nb_avail = 0;
rxq->rx_tail = (uint16_t)(rxq->rx_tail - nb_rx);
for (i = 0, j = rxq->rx_tail; i < nb_rx; i++, j++)
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on\n",
- rx_queue_id);
+ rx_queue_id);
i40e_rx_queue_release_mbufs(rxq);
i40e_reset_rx_queue(rxq);
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off\n",
- rx_queue_id);
+ rx_queue_id);
return err;
}
i40e_rx_queue_release_mbufs(rxq);
err = i40e_switch_tx_queue(hw, tx_queue_id + q_base, TRUE);
if (err)
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on\n",
- tx_queue_id);
+ tx_queue_id);
}
return err;
if (err) {
PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of\n",
- tx_queue_id);
+ tx_queue_id);
return err;
}
if (!vsi || queue_idx >= vsi->nb_qps) {
PMD_DRV_LOG(ERR, "VSI not available or queue "
- "index exceeds the maximum\n");
+ "index exceeds the maximum\n");
return I40E_ERR_PARAM;
}
if (((nb_desc * sizeof(union i40e_rx_desc)) % I40E_ALIGN) != 0 ||
(nb_desc > I40E_MAX_RING_DESC) ||
(nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of receive descriptors is "
- "invalid\n", nb_desc);
+ "invalid\n", nb_desc);
return I40E_ERR_PARAM;
}
socket_id);
if (!rxq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "rx queue data structure\n");
+ "rx queue data structure\n");
return (-ENOMEM);
}
rxq->mp = mp;
if (!use_def_burst_func && !dev->data->scattered_rx) {
#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "satisfied. Rx Burst Bulk Alloc function will be "
- "used on port=%d, queue=%d.\n",
- rxq->port_id, rxq->queue_id);
+ "satisfied. Rx Burst Bulk Alloc function will be "
+ "used on port=%d, queue=%d.\n",
+ rxq->port_id, rxq->queue_id);
dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc;
#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */
} else {
PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are "
- "not satisfied, Scattered Rx is requested, "
- "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
- "not enabled on port=%d, queue=%d.\n",
- rxq->port_id, rxq->queue_id);
+ "not satisfied, Scattered Rx is requested, "
+ "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is "
+ "not enabled on port=%d, queue=%d.\n",
+ rxq->port_id, rxq->queue_id);
}
return 0;
if (!vsi || queue_idx >= vsi->nb_qps) {
PMD_DRV_LOG(ERR, "VSI is NULL, or queue index (%u) "
- "exceeds the maximum\n", queue_idx);
+ "exceeds the maximum\n", queue_idx);
return I40E_ERR_PARAM;
}
(nb_desc > I40E_MAX_RING_DESC) ||
(nb_desc < I40E_MIN_RING_DESC)) {
PMD_DRV_LOG(ERR, "Number (%u) of transmit descriptors is "
- "invalid\n", nb_desc);
+ "invalid\n", nb_desc);
return I40E_ERR_PARAM;
}
socket_id);
if (!txq) {
PMD_DRV_LOG(ERR, "Failed to allocate memory for "
- "tx queue structure\n");
+ "tx queue structure\n");
return (-ENOMEM);
}
if (rxq->max_pkt_len <= ETHER_MAX_LEN ||
rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) {
PMD_DRV_LOG(ERR, "maximum packet length must "
- "be larger than %u and smaller than %u,"
- "as jumbo frame is enabled\n",
- (uint32_t)ETHER_MAX_LEN,
- (uint32_t)I40E_FRAME_SIZE_MAX);
+ "be larger than %u and smaller than %u,"
+ "as jumbo frame is enabled\n",
+ (uint32_t)ETHER_MAX_LEN,
+ (uint32_t)I40E_FRAME_SIZE_MAX);
return I40E_ERR_CONFIG;
}
} else {
if (rxq->max_pkt_len < ETHER_MIN_LEN ||
rxq->max_pkt_len > ETHER_MAX_LEN) {
PMD_DRV_LOG(ERR, "maximum packet length must be "
- "larger than %u and smaller than %u, "
- "as jumbo frame is disabled\n",
- (uint32_t)ETHER_MIN_LEN,
- (uint32_t)ETHER_MAX_LEN);
+ "larger than %u and smaller than %u, "
+ "as jumbo frame is disabled\n",
+ (uint32_t)ETHER_MIN_LEN,
+ (uint32_t)ETHER_MAX_LEN);
return I40E_ERR_CONFIG;
}
}