}
new_link.link_status = cxgbe_force_linkup(adapter) ?
- ETH_LINK_UP : pi->link_cfg.link_ok;
+ RTE_ETH_LINK_UP : pi->link_cfg.link_ok;
new_link.link_autoneg = (lc->link_caps & FW_PORT_CAP32_ANEG) ? 1 : 0;
- new_link.link_duplex = ETH_LINK_FULL_DUPLEX;
+ new_link.link_duplex = RTE_ETH_LINK_FULL_DUPLEX;
new_link.link_speed = t4_fwcap_to_speed(lc->link_caps);
return rte_eth_linkstatus_set(eth_dev, &new_link);
{
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
- struct rte_eth_dev_info dev_info;
- int err;
uint16_t new_mtu = mtu + RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN;
- err = cxgbe_dev_info_get(eth_dev, &dev_info);
- if (err != 0)
- return err;
-
- /* Must accommodate at least RTE_ETHER_MIN_MTU */
- if (new_mtu < RTE_ETHER_MIN_MTU || new_mtu > dev_info.max_rx_pktlen)
- return -EINVAL;
-
- /* set to jumbo mode if needed */
- if (new_mtu > CXGBE_ETH_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
- err = t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
+ return t4_set_rxmode(adapter, adapter->mbox, pi->viid, new_mtu, -1, -1,
-1, -1, true);
- if (!err)
- eth_dev->data->dev_conf.rxmode.max_rx_pkt_len = new_mtu;
-
- return err;
}
/*
goto out;
}
- if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
+ if (rx_conf->offloads & RTE_ETH_RX_OFFLOAD_SCATTER)
eth_dev->data->scattered_rx = 1;
else
eth_dev->data->scattered_rx = 0;
CXGBE_FUNC_TRACE();
- if (eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)
+ if (eth_dev->data->dev_conf.rxmode.mq_mode & RTE_ETH_MQ_RX_RSS_FLAG)
eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_RSS_HASH;
+ RTE_ETH_RX_OFFLOAD_RSS_HASH;
if (!(adapter->flags & FW_QUEUE_BOUND)) {
err = cxgbe_setup_sge_fwevtq(adapter);
/* Free up the existing queue */
if (eth_dev->data->tx_queues[queue_idx]) {
- cxgbe_dev_tx_queue_release(eth_dev->data->tx_queues[queue_idx]);
+ cxgbe_dev_tx_queue_release(eth_dev, queue_idx);
eth_dev->data->tx_queues[queue_idx] = NULL;
}
return err;
}
-void cxgbe_dev_tx_queue_release(void *q)
+void cxgbe_dev_tx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
- struct sge_eth_txq *txq = (struct sge_eth_txq *)q;
+ struct sge_eth_txq *txq = eth_dev->data->tx_queues[qid];
if (txq) {
struct port_info *pi = (struct port_info *)
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
- unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ unsigned int pkt_len = eth_dev->data->mtu + RTE_ETHER_HDR_LEN +
+ RTE_ETHER_CRC_LEN;
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
struct rte_eth_dev_info dev_info;
/* Free up the existing queue */
if (eth_dev->data->rx_queues[queue_idx]) {
- cxgbe_dev_rx_queue_release(eth_dev->data->rx_queues[queue_idx]);
+ cxgbe_dev_rx_queue_release(eth_dev, queue_idx);
eth_dev->data->rx_queues[queue_idx] = NULL;
}
rxq->rspq.size = temp_nb_desc;
rxq->fl.size = temp_nb_desc;
- /* Set to jumbo mode if necessary */
- if (pkt_len > CXGBE_ETH_MAX_LEN)
- eth_dev->data->dev_conf.rxmode.offloads |=
- DEV_RX_OFFLOAD_JUMBO_FRAME;
- else
- eth_dev->data->dev_conf.rxmode.offloads &=
- ~DEV_RX_OFFLOAD_JUMBO_FRAME;
-
err = t4_sge_alloc_rxq(adapter, &rxq->rspq, false, eth_dev, msi_idx,
&rxq->fl, NULL,
is_pf4(adapter) ?
return err;
}
-void cxgbe_dev_rx_queue_release(void *q)
+void cxgbe_dev_rx_queue_release(struct rte_eth_dev *eth_dev, uint16_t qid)
{
- struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
+ struct sge_eth_rxq *rxq = eth_dev->data->rx_queues[qid];
if (rxq) {
struct port_info *pi = (struct port_info *)
rx_pause = 1;
if (rx_pause && tx_pause)
- fc_conf->mode = RTE_FC_FULL;
+ fc_conf->mode = RTE_ETH_FC_FULL;
else if (rx_pause)
- fc_conf->mode = RTE_FC_RX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_RX_PAUSE;
else if (tx_pause)
- fc_conf->mode = RTE_FC_TX_PAUSE;
+ fc_conf->mode = RTE_ETH_FC_TX_PAUSE;
else
- fc_conf->mode = RTE_FC_NONE;
+ fc_conf->mode = RTE_ETH_FC_NONE;
return 0;
}
u8 tx_pause = 0, rx_pause = 0;
int ret;
- if (fc_conf->mode == RTE_FC_FULL) {
+ if (fc_conf->mode == RTE_ETH_FC_FULL) {
tx_pause = 1;
rx_pause = 1;
- } else if (fc_conf->mode == RTE_FC_TX_PAUSE) {
+ } else if (fc_conf->mode == RTE_ETH_FC_TX_PAUSE) {
tx_pause = 1;
- } else if (fc_conf->mode == RTE_FC_RX_PAUSE) {
+ } else if (fc_conf->mode == RTE_ETH_FC_RX_PAUSE) {
rx_pause = 1;
}
rss_hf |= CXGBE_RSS_HF_IPV6_MASK;
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) {
- rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_TCP;
if (flags & F_FW_RSS_VI_CONFIG_CMD_UDPEN)
- rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
+ rss_hf |= RTE_ETH_RSS_NONFRAG_IPV4_UDP;
}
if (flags & F_FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
rte_memcpy(rss, pi->rss, pi->rss_size * sizeof(u16));
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
return -EINVAL;
for (i = 0; i < reta_size; i++) {
- idx = i / RTE_RETA_GROUP_SIZE;
- shift = i % RTE_RETA_GROUP_SIZE;
+ idx = i / RTE_ETH_RETA_GROUP_SIZE;
+ shift = i % RTE_ETH_RETA_GROUP_SIZE;
if (!(reta_conf[idx].mask & (1ULL << shift)))
continue;
if (lc->pcaps & FW_PORT_CAP32_SPEED_100G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_100G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_100G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS);
}
if (lc->pcaps & FW_PORT_CAP32_SPEED_50G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_50G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_50G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER);
}
if (lc->pcaps & FW_PORT_CAP32_SPEED_25G) {
if (capa_arr) {
- capa_arr[num].speed = ETH_SPEED_NUM_25G;
+ capa_arr[num].speed = RTE_ETH_SPEED_NUM_25G;
capa_arr[num].capa = RTE_ETH_FEC_MODE_CAPA_MASK(NOFEC) |
RTE_ETH_FEC_MODE_CAPA_MASK(BASER) |
RTE_ETH_FEC_MODE_CAPA_MASK(RS);