{
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
- int max_queues = adapter->sge.max_ethqsets / adapter->params.nports;
static const struct rte_eth_desc_lim cxgbe_desc_lim = {
.nb_max = CXGBE_MAX_RING_DESC_SIZE,
device_info->min_rx_bufsize = CXGBE_MIN_RX_BUFSIZE;
device_info->max_rx_pktlen = CXGBE_MAX_RX_PKTLEN;
- device_info->max_rx_queues = max_queues;
- device_info->max_tx_queues = max_queues;
+ device_info->max_rx_queues = adapter->sge.max_ethqsets;
+ device_info->max_tx_queues = adapter->sge.max_ethqsets;
device_info->max_mac_addrs = 1;
/* XXX: For now we support one MAC/port */
device_info->max_vfs = adapter->params.arch.vfcount;
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
struct sge *s = &adapter->sge;
- struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset + queue_idx];
- int err = 0;
unsigned int temp_nb_desc;
+ struct sge_eth_txq *txq;
+ int err = 0;
+ txq = &s->ethtxq[pi->first_txqset + queue_idx];
dev_debug(adapter, "%s: eth_dev->data->nb_tx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; pi->first_qset = %u\n",
__func__, eth_dev->data->nb_tx_queues, queue_idx, nb_desc,
- socket_id, pi->first_qset);
+ socket_id, pi->first_txqset);
/* Free up the existing queue */
if (eth_dev->data->tx_queues[queue_idx]) {
const struct rte_eth_rxconf *rx_conf __rte_unused,
struct rte_mempool *mp)
{
+ unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adapter = pi->adapter;
+ struct rte_eth_dev_info dev_info;
struct sge *s = &adapter->sge;
- struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset + queue_idx];
- int err = 0;
- int msi_idx = 0;
unsigned int temp_nb_desc;
- struct rte_eth_dev_info dev_info;
- unsigned int pkt_len = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
+ int err = 0, msi_idx = 0;
+ struct sge_eth_rxq *rxq;
+ rxq = &s->ethrxq[pi->first_rxqset + queue_idx];
dev_debug(adapter, "%s: eth_dev->data->nb_rx_queues = %d; queue_idx = %d; nb_desc = %d; socket_id = %d; mp = %p\n",
__func__, eth_dev->data->nb_rx_queues, queue_idx, nb_desc,
socket_id, mp);
void cxgbe_dev_rx_queue_release(void *q)
{
struct sge_eth_rxq *rxq = (struct sge_eth_rxq *)q;
- struct sge_rspq *rq = &rxq->rspq;
- if (rq) {
+ if (rxq) {
struct port_info *pi = (struct port_info *)
- (rq->eth_dev->data->dev_private);
+ (rxq->rspq.eth_dev->data->dev_private);
struct adapter *adap = pi->adapter;
dev_debug(adapter, "%s: pi->port_id = %d; rx_queue_id = %d\n",
for (i = 0; i < pi->n_rx_qsets; i++) {
struct sge_eth_rxq *rxq =
- &s->ethrxq[pi->first_qset + i];
+ &s->ethrxq[pi->first_rxqset + i];
eth_stats->q_ipackets[i] = rxq->stats.pkts;
eth_stats->q_ibytes[i] = rxq->stats.rx_bytes;
for (i = 0; i < pi->n_tx_qsets; i++) {
struct sge_eth_txq *txq =
- &s->ethtxq[pi->first_qset + i];
+ &s->ethtxq[pi->first_txqset + i];
eth_stats->q_opackets[i] = txq->stats.pkts;
eth_stats->q_obytes[i] = txq->stats.tx_bytes;
cxgbe_stats_reset(pi);
for (i = 0; i < pi->n_rx_qsets; i++) {
struct sge_eth_rxq *rxq =
- &s->ethrxq[pi->first_qset + i];
+ &s->ethrxq[pi->first_rxqset + i];
rxq->stats.pkts = 0;
rxq->stats.rx_bytes = 0;
}
for (i = 0; i < pi->n_tx_qsets; i++) {
struct sge_eth_txq *txq =
- &s->ethtxq[pi->first_qset + i];
+ &s->ethtxq[pi->first_txqset + i];
txq->stats.pkts = 0;
txq->stats.tx_bytes = 0;
return 0;
}
-static inline bool is_x_1g_port(const struct link_config *lc)
-{
- return (lc->pcaps & FW_PORT_CAP32_SPEED_1G) != 0;
-}
-
-static inline bool is_x_10g_port(const struct link_config *lc)
-{
- unsigned int speeds, high_speeds;
-
- speeds = V_FW_PORT_CAP32_SPEED(G_FW_PORT_CAP32_SPEED(lc->pcaps));
- high_speeds = speeds &
- ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
-
- return high_speeds != 0;
-}
-
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
unsigned int us, unsigned int cnt,
unsigned int size, unsigned int iqe_size)
int cxgbe_cfg_queue_count(struct rte_eth_dev *eth_dev)
{
- struct port_info *pi = eth_dev->data->dev_private;
+ struct port_info *temp_pi, *pi = eth_dev->data->dev_private;
struct adapter *adap = pi->adapter;
+ u16 first_txq = 0, first_rxq = 0;
struct sge *s = &adap->sge;
- unsigned int max_queues = s->max_ethqsets / adap->params.nports;
+ u16 i, max_rxqs, max_txqs;
+
+ max_rxqs = s->max_ethqsets;
+ max_txqs = s->max_ethqsets;
+ for_each_port(adap, i) {
+ temp_pi = adap2pinfo(adap, i);
+ if (i == pi->port_id)
+ break;
+
+ if (max_rxqs <= temp_pi->n_rx_qsets ||
+ max_txqs <= temp_pi->n_tx_qsets)
+ return -ENOMEM;
+
+ first_rxq += temp_pi->n_rx_qsets;
+ first_txq += temp_pi->n_tx_qsets;
+ max_rxqs -= temp_pi->n_rx_qsets;
+ max_txqs -= temp_pi->n_tx_qsets;
+ }
if ((eth_dev->data->nb_rx_queues < 1) ||
(eth_dev->data->nb_tx_queues < 1))
return -EINVAL;
- if ((eth_dev->data->nb_rx_queues > max_queues) ||
- (eth_dev->data->nb_tx_queues > max_queues))
- return -EINVAL;
-
- if (eth_dev->data->nb_rx_queues > pi->rss_size)
+ if (eth_dev->data->nb_rx_queues > max_rxqs ||
+ eth_dev->data->nb_tx_queues > max_txqs)
return -EINVAL;
/* We must configure RSS, since config has changed*/
pi->n_rx_qsets = eth_dev->data->nb_rx_queues;
pi->n_tx_qsets = eth_dev->data->nb_tx_queues;
+ pi->first_rxqset = first_rxq;
+ pi->first_txqset = first_txq;
return 0;
}
-void cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
+void cxgbe_cfg_queues_free(struct adapter *adap)
+{
+ if (adap->sge.ethtxq) {
+ rte_free(adap->sge.ethtxq);
+ adap->sge.ethtxq = NULL;
+ }
+
+ if (adap->sge.ethrxq) {
+ rte_free(adap->sge.ethrxq);
+ adap->sge.ethrxq = NULL;
+ }
+
+ adap->flags &= ~CFG_QUEUES;
+}
+
+int cxgbe_cfg_queues(struct rte_eth_dev *eth_dev)
{
struct port_info *pi = eth_dev->data->dev_private;
struct adapter *adap = pi->adapter;
struct sge *s = &adap->sge;
- unsigned int i, nb_ports = 0, qidx = 0;
- unsigned int q_per_port = 0;
+ u16 i;
if (!(adap->flags & CFG_QUEUES)) {
- for_each_port(adap, i) {
- struct port_info *tpi = adap2pinfo(adap, i);
-
- nb_ports += (is_x_10g_port(&tpi->link_cfg)) ||
- is_x_1g_port(&tpi->link_cfg) ? 1 : 0;
- }
-
- /*
- * We default up to # of cores queues per 1G/10G port.
- */
- if (nb_ports)
- q_per_port = (s->max_ethqsets -
- (adap->params.nports - nb_ports)) /
- nb_ports;
-
- if (q_per_port > rte_lcore_count())
- q_per_port = rte_lcore_count();
-
- for_each_port(adap, i) {
- struct port_info *pi = adap2pinfo(adap, i);
-
- pi->first_qset = qidx;
-
- /* Initially n_rx_qsets == n_tx_qsets */
- pi->n_rx_qsets = (is_x_10g_port(&pi->link_cfg) ||
- is_x_1g_port(&pi->link_cfg)) ?
- q_per_port : 1;
- pi->n_tx_qsets = pi->n_rx_qsets;
-
- if (pi->n_rx_qsets > pi->rss_size)
- pi->n_rx_qsets = pi->rss_size;
+ s->ethrxq = rte_calloc_socket(NULL, s->max_ethqsets,
+ sizeof(struct sge_eth_rxq), 0,
+ rte_socket_id());
+ if (!s->ethrxq)
+ return -ENOMEM;
- qidx += pi->n_rx_qsets;
+ s->ethtxq = rte_calloc_socket(NULL, s->max_ethqsets,
+ sizeof(struct sge_eth_txq), 0,
+ rte_socket_id());
+ if (!s->ethtxq) {
+ rte_free(s->ethrxq);
+ s->ethrxq = NULL;
+ return -ENOMEM;
}
- for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+ for (i = 0; i < s->max_ethqsets; i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
+ struct sge_eth_txq *t = &s->ethtxq[i];
init_rspq(adap, &r->rspq, 5, 32, 1024, 64);
r->usembufs = 1;
r->fl.size = (r->usembufs ? 1024 : 72);
- }
- for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
- s->ethtxq[i].q.size = 1024;
+ t->q.size = 1024;
+ }
init_rspq(adap, &adap->sge.fw_evtq, 0, 0, 1024, 64);
adap->flags |= CFG_QUEUES;
}
+
+ return 0;
}
void cxgbe_stats_get(struct port_info *pi, struct port_stats *stats)
/* Figure out how many Queue Sets we can support */
void cxgbe_configure_max_ethqsets(struct adapter *adapter)
{
- unsigned int ethqsets;
+ unsigned int ethqsets, reserved;
- /*
- * We need to reserve an Ingress Queue for the Asynchronous Firmware
- * Event Queue.
+ /* We need to reserve an Ingress Queue for the Asynchronous Firmware
+ * Event Queue and 1 Control Queue per port.
*
* For each Queue Set, we'll need the ability to allocate two Egress
* Contexts -- one for the Ingress Queue Free List and one for the TX
* Ethernet Queue.
*/
+ reserved = max(adapter->params.nports, 1);
if (is_pf4(adapter)) {
struct pf_resources *pfres = &adapter->params.pfres;
- ethqsets = pfres->niqflint - 1;
- if (pfres->neq < ethqsets * 2)
+ ethqsets = min(pfres->niqflint, pfres->nethctrl);
+ if (ethqsets > (pfres->neq / 2))
ethqsets = pfres->neq / 2;
} else {
struct vf_resources *vfres = &adapter->params.vfres;
- ethqsets = vfres->niqflint - 1;
- if (vfres->nethctrl != ethqsets)
- ethqsets = min(vfres->nethctrl, ethqsets);
- if (vfres->neq < ethqsets * 2)
+ ethqsets = min(vfres->niqflint, vfres->nethctrl);
+ if (ethqsets > (vfres->neq / 2))
ethqsets = vfres->neq / 2;
}
- if (ethqsets > MAX_ETH_QSETS)
- ethqsets = MAX_ETH_QSETS;
+ ethqsets -= reserved;
adapter->sge.max_ethqsets = ethqsets;
}
F_FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN |
F_FW_RSS_VI_CONFIG_CMD_UDPEN;
- rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rxq = &adapter->sge.ethrxq[pi->first_rxqset];
rss = rxq[0].rspq.abs_id;
/* If Tunnel All Lookup isn't specified in the global RSS
/* Should never be called before setting up sge eth rx queues */
BUG_ON(!(adapter->flags & FULL_INIT_DONE));
- rxq = &adapter->sge.ethrxq[pi->first_qset];
+ rxq = &adapter->sge.ethrxq[pi->first_rxqset];
rss = rte_zmalloc(NULL, pi->rss_size * sizeof(u16), 0);
if (!rss)
return -ENOMEM;
unsigned int i;
for (i = 0; i < pi->n_rx_qsets; i++)
- enable_rx(adap, &s->ethrxq[pi->first_qset + i].rspq);
+ enable_rx(adap, &s->ethrxq[pi->first_rxqset + i].rspq);
}
/**
adapter->flags &= ~FULL_INIT_DONE;
}
+ cxgbe_cfg_queues_free(adapter);
+
if (is_pf4(adapter) && (adapter->flags & FW_OK))
t4_fw_bye(adapter, adapter->mbox);
}
}
}
- cxgbe_cfg_queues(adapter->eth_dev);
+ err = cxgbe_cfg_queues(adapter->eth_dev);
+ if (err)
+ goto out_free;
cxgbe_print_adapter_info(adapter);
cxgbe_print_port_info(adapter);
return 0;
out_free:
+ cxgbe_cfg_queues_free(adapter);
+
for_each_port(adapter, i) {
pi = adap2pinfo(adapter, i);
if (pi->viid != 0)