4 * Copyright(c) 2017 Cavium, Inc.. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Cavium, Inc. nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_ethdev_pci.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_alarm.h>
41 #include "lio_23xx_vf.h"
42 #include "lio_ethdev.h"
45 /* Default RSS key in use */
46 static uint8_t lio_rss_key[40] = {
47 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
48 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
49 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
50 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
51 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
54 static const struct rte_eth_desc_lim lio_rx_desc_lim = {
55 .nb_max = CN23XX_MAX_OQ_DESCRIPTORS,
56 .nb_min = CN23XX_MIN_OQ_DESCRIPTORS,
60 static const struct rte_eth_desc_lim lio_tx_desc_lim = {
61 .nb_max = CN23XX_MAX_IQ_DESCRIPTORS,
62 .nb_min = CN23XX_MIN_IQ_DESCRIPTORS,
66 /* Wait for control command to reach nic. */
68 lio_wait_for_ctrl_cmd(struct lio_device *lio_dev,
69 struct lio_dev_ctrl_cmd *ctrl_cmd)
71 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
73 while ((ctrl_cmd->cond == 0) && --timeout) {
74 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
82 * \brief Send Rx control command
83 * @param eth_dev Pointer to the structure rte_eth_dev
84 * @param start_stop whether to start or stop
87 lio_send_rx_ctrl_cmd(struct rte_eth_dev *eth_dev, int start_stop)
89 struct lio_device *lio_dev = LIO_DEV(eth_dev);
90 struct lio_dev_ctrl_cmd ctrl_cmd;
91 struct lio_ctrl_pkt ctrl_pkt;
93 /* flush added to prevent cmd failure
94 * incase the queue is full
96 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
98 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
99 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
101 ctrl_cmd.eth_dev = eth_dev;
104 ctrl_pkt.ncmd.s.cmd = LIO_CMD_RX_CTL;
105 ctrl_pkt.ncmd.s.param1 = start_stop;
106 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
108 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
109 lio_dev_err(lio_dev, "Failed to send RX Control message\n");
113 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
114 lio_dev_err(lio_dev, "RX Control command timed out\n");
121 /* store statistics names and its offset in stats structure */
122 struct rte_lio_xstats_name_off {
123 char name[RTE_ETH_XSTATS_NAME_SIZE];
127 static const struct rte_lio_xstats_name_off rte_lio_stats_strings[] = {
128 {"rx_pkts", offsetof(struct octeon_rx_stats, total_rcvd)},
129 {"rx_bytes", offsetof(struct octeon_rx_stats, bytes_rcvd)},
130 {"rx_broadcast_pkts", offsetof(struct octeon_rx_stats, total_bcst)},
131 {"rx_multicast_pkts", offsetof(struct octeon_rx_stats, total_mcst)},
132 {"rx_flow_ctrl_pkts", offsetof(struct octeon_rx_stats, ctl_rcvd)},
133 {"rx_fifo_err", offsetof(struct octeon_rx_stats, fifo_err)},
134 {"rx_dmac_drop", offsetof(struct octeon_rx_stats, dmac_drop)},
135 {"rx_fcs_err", offsetof(struct octeon_rx_stats, fcs_err)},
136 {"rx_jabber_err", offsetof(struct octeon_rx_stats, jabber_err)},
137 {"rx_l2_err", offsetof(struct octeon_rx_stats, l2_err)},
138 {"rx_vxlan_pkts", offsetof(struct octeon_rx_stats, fw_rx_vxlan)},
139 {"rx_vxlan_err", offsetof(struct octeon_rx_stats, fw_rx_vxlan_err)},
140 {"rx_lro_pkts", offsetof(struct octeon_rx_stats, fw_lro_pkts)},
141 {"tx_pkts", (offsetof(struct octeon_tx_stats, total_pkts_sent)) +
142 sizeof(struct octeon_rx_stats)},
143 {"tx_bytes", (offsetof(struct octeon_tx_stats, total_bytes_sent)) +
144 sizeof(struct octeon_rx_stats)},
145 {"tx_broadcast_pkts",
146 (offsetof(struct octeon_tx_stats, bcast_pkts_sent)) +
147 sizeof(struct octeon_rx_stats)},
148 {"tx_multicast_pkts",
149 (offsetof(struct octeon_tx_stats, mcast_pkts_sent)) +
150 sizeof(struct octeon_rx_stats)},
151 {"tx_flow_ctrl_pkts", (offsetof(struct octeon_tx_stats, ctl_sent)) +
152 sizeof(struct octeon_rx_stats)},
153 {"tx_fifo_err", (offsetof(struct octeon_tx_stats, fifo_err)) +
154 sizeof(struct octeon_rx_stats)},
155 {"tx_total_collisions", (offsetof(struct octeon_tx_stats,
157 sizeof(struct octeon_rx_stats)},
158 {"tx_tso", (offsetof(struct octeon_tx_stats, fw_tso)) +
159 sizeof(struct octeon_rx_stats)},
160 {"tx_vxlan_pkts", (offsetof(struct octeon_tx_stats, fw_tx_vxlan)) +
161 sizeof(struct octeon_rx_stats)},
164 #define LIO_NB_XSTATS RTE_DIM(rte_lio_stats_strings)
166 /* Get hw stats of the port */
168 lio_dev_xstats_get(struct rte_eth_dev *eth_dev, struct rte_eth_xstat *xstats,
171 struct lio_device *lio_dev = LIO_DEV(eth_dev);
172 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
173 struct octeon_link_stats *hw_stats;
174 struct lio_link_stats_resp *resp;
175 struct lio_soft_command *sc;
180 if (!lio_dev->intf_open) {
181 lio_dev_err(lio_dev, "Port %d down\n",
186 if (n < LIO_NB_XSTATS)
187 return LIO_NB_XSTATS;
189 resp_size = sizeof(struct lio_link_stats_resp);
190 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
194 resp = (struct lio_link_stats_resp *)sc->virtrptr;
195 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
196 LIO_OPCODE_PORT_STATS, 0, 0, 0);
198 /* Setting wait time in seconds */
199 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
201 retval = lio_send_soft_command(lio_dev, sc);
202 if (retval == LIO_IQ_SEND_FAILED) {
203 lio_dev_err(lio_dev, "failed to get port stats from firmware. status: %x\n",
208 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
209 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
210 lio_process_ordered_list(lio_dev);
214 retval = resp->status;
216 lio_dev_err(lio_dev, "failed to get port stats from firmware\n");
220 lio_swap_8B_data((uint64_t *)(&resp->link_stats),
221 sizeof(struct octeon_link_stats) >> 3);
223 hw_stats = &resp->link_stats;
225 for (i = 0; i < LIO_NB_XSTATS; i++) {
228 *(uint64_t *)(((char *)hw_stats) +
229 rte_lio_stats_strings[i].offset);
232 lio_free_soft_command(sc);
234 return LIO_NB_XSTATS;
237 lio_free_soft_command(sc);
243 lio_dev_xstats_get_names(struct rte_eth_dev *eth_dev,
244 struct rte_eth_xstat_name *xstats_names,
245 unsigned limit __rte_unused)
247 struct lio_device *lio_dev = LIO_DEV(eth_dev);
250 if (!lio_dev->intf_open) {
251 lio_dev_err(lio_dev, "Port %d down\n",
256 if (xstats_names == NULL)
257 return LIO_NB_XSTATS;
259 /* Note: limit checked in rte_eth_xstats_names() */
261 for (i = 0; i < LIO_NB_XSTATS; i++) {
262 snprintf(xstats_names[i].name, sizeof(xstats_names[i].name),
263 "%s", rte_lio_stats_strings[i].name);
266 return LIO_NB_XSTATS;
269 /* Reset hw stats for the port */
271 lio_dev_xstats_reset(struct rte_eth_dev *eth_dev)
273 struct lio_device *lio_dev = LIO_DEV(eth_dev);
274 struct lio_dev_ctrl_cmd ctrl_cmd;
275 struct lio_ctrl_pkt ctrl_pkt;
277 if (!lio_dev->intf_open) {
278 lio_dev_err(lio_dev, "Port %d down\n",
283 /* flush added to prevent cmd failure
284 * incase the queue is full
286 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
288 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
289 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
291 ctrl_cmd.eth_dev = eth_dev;
294 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CLEAR_STATS;
295 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
297 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
298 lio_dev_err(lio_dev, "Failed to send clear stats command\n");
302 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
303 lio_dev_err(lio_dev, "Clear stats command timed out\n");
307 /* clear stored per queue stats */
308 RTE_FUNC_PTR_OR_RET(*eth_dev->dev_ops->stats_reset);
309 (*eth_dev->dev_ops->stats_reset)(eth_dev);
312 /* Retrieve the device statistics (# packets in/out, # bytes in/out, etc */
314 lio_dev_stats_get(struct rte_eth_dev *eth_dev,
315 struct rte_eth_stats *stats)
317 struct lio_device *lio_dev = LIO_DEV(eth_dev);
318 struct lio_droq_stats *oq_stats;
319 struct lio_iq_stats *iq_stats;
320 struct lio_instr_queue *txq;
321 struct lio_droq *droq;
327 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
328 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
329 txq = lio_dev->instr_queue[iq_no];
331 iq_stats = &txq->stats;
332 pkts += iq_stats->tx_done;
333 drop += iq_stats->tx_dropped;
334 bytes += iq_stats->tx_tot_bytes;
338 stats->opackets = pkts;
339 stats->obytes = bytes;
340 stats->oerrors = drop;
346 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
347 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
348 droq = lio_dev->droq[oq_no];
350 oq_stats = &droq->stats;
351 pkts += oq_stats->rx_pkts_received;
352 drop += (oq_stats->rx_dropped +
353 oq_stats->dropped_toomany +
354 oq_stats->dropped_nomem);
355 bytes += oq_stats->rx_bytes_received;
358 stats->ibytes = bytes;
359 stats->ipackets = pkts;
360 stats->ierrors = drop;
364 lio_dev_stats_reset(struct rte_eth_dev *eth_dev)
366 struct lio_device *lio_dev = LIO_DEV(eth_dev);
367 struct lio_droq_stats *oq_stats;
368 struct lio_iq_stats *iq_stats;
369 struct lio_instr_queue *txq;
370 struct lio_droq *droq;
373 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
374 iq_no = lio_dev->linfo.txpciq[i].s.q_no;
375 txq = lio_dev->instr_queue[iq_no];
377 iq_stats = &txq->stats;
378 memset(iq_stats, 0, sizeof(struct lio_iq_stats));
382 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
383 oq_no = lio_dev->linfo.rxpciq[i].s.q_no;
384 droq = lio_dev->droq[oq_no];
386 oq_stats = &droq->stats;
387 memset(oq_stats, 0, sizeof(struct lio_droq_stats));
393 lio_dev_info_get(struct rte_eth_dev *eth_dev,
394 struct rte_eth_dev_info *devinfo)
396 struct lio_device *lio_dev = LIO_DEV(eth_dev);
397 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
399 devinfo->pci_dev = pci_dev;
401 switch (pci_dev->id.subsystem_device_id) {
402 /* CN23xx 10G cards */
403 case PCI_SUBSYS_DEV_ID_CN2350_210:
404 case PCI_SUBSYS_DEV_ID_CN2360_210:
405 devinfo->speed_capa = ETH_LINK_SPEED_10G;
407 /* CN23xx 25G cards */
408 case PCI_SUBSYS_DEV_ID_CN2350_225:
409 case PCI_SUBSYS_DEV_ID_CN2360_225:
410 devinfo->speed_capa = ETH_LINK_SPEED_25G;
414 "Unknown CN23XX subsystem device id. Not setting speed capability.\n");
417 devinfo->max_rx_queues = lio_dev->max_rx_queues;
418 devinfo->max_tx_queues = lio_dev->max_tx_queues;
420 devinfo->min_rx_bufsize = LIO_MIN_RX_BUF_SIZE;
421 devinfo->max_rx_pktlen = LIO_MAX_RX_PKTLEN;
423 devinfo->max_mac_addrs = 1;
425 devinfo->rx_offload_capa = (DEV_RX_OFFLOAD_IPV4_CKSUM |
426 DEV_RX_OFFLOAD_UDP_CKSUM |
427 DEV_RX_OFFLOAD_TCP_CKSUM |
428 DEV_RX_OFFLOAD_VLAN_STRIP);
429 devinfo->tx_offload_capa = (DEV_TX_OFFLOAD_IPV4_CKSUM |
430 DEV_TX_OFFLOAD_UDP_CKSUM |
431 DEV_TX_OFFLOAD_TCP_CKSUM |
432 DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM);
434 devinfo->rx_desc_lim = lio_rx_desc_lim;
435 devinfo->tx_desc_lim = lio_tx_desc_lim;
437 devinfo->reta_size = LIO_RSS_MAX_TABLE_SZ;
438 devinfo->hash_key_size = LIO_RSS_MAX_KEY_SZ;
439 devinfo->flow_type_rss_offloads = (ETH_RSS_IPV4 |
440 ETH_RSS_NONFRAG_IPV4_TCP |
442 ETH_RSS_NONFRAG_IPV6_TCP |
444 ETH_RSS_IPV6_TCP_EX);
448 lio_dev_validate_vf_mtu(struct rte_eth_dev *eth_dev, uint16_t new_mtu)
450 struct lio_device *lio_dev = LIO_DEV(eth_dev);
452 PMD_INIT_FUNC_TRACE();
454 if (!lio_dev->intf_open) {
455 lio_dev_err(lio_dev, "Port %d down, can't check MTU\n",
460 /* Limit the MTU to make sure the ethernet packets are between
461 * ETHER_MIN_MTU bytes and PF's MTU
463 if ((new_mtu < ETHER_MIN_MTU) ||
464 (new_mtu > lio_dev->linfo.link.s.mtu)) {
465 lio_dev_err(lio_dev, "Invalid MTU: %d\n", new_mtu);
466 lio_dev_err(lio_dev, "Valid range %d and %d\n",
467 ETHER_MIN_MTU, lio_dev->linfo.link.s.mtu);
475 lio_dev_rss_reta_update(struct rte_eth_dev *eth_dev,
476 struct rte_eth_rss_reta_entry64 *reta_conf,
479 struct lio_device *lio_dev = LIO_DEV(eth_dev);
480 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
481 struct lio_rss_set *rss_param;
482 struct lio_dev_ctrl_cmd ctrl_cmd;
483 struct lio_ctrl_pkt ctrl_pkt;
486 if (!lio_dev->intf_open) {
487 lio_dev_err(lio_dev, "Port %d down, can't update reta\n",
492 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
494 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
495 reta_size, LIO_RSS_MAX_TABLE_SZ);
499 /* flush added to prevent cmd failure
500 * incase the queue is full
502 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
504 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
505 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
507 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
509 ctrl_cmd.eth_dev = eth_dev;
512 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
513 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
514 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
516 rss_param->param.flags = 0xF;
517 rss_param->param.flags &= ~LIO_RSS_PARAM_ITABLE_UNCHANGED;
518 rss_param->param.itablesize = LIO_RSS_MAX_TABLE_SZ;
520 for (i = 0; i < (reta_size / RTE_RETA_GROUP_SIZE); i++) {
521 for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) {
522 if ((reta_conf[i].mask) & ((uint64_t)1 << j)) {
523 index = (i * RTE_RETA_GROUP_SIZE) + j;
524 rss_state->itable[index] = reta_conf[i].reta[j];
529 rss_state->itable_size = LIO_RSS_MAX_TABLE_SZ;
530 memcpy(rss_param->itable, rss_state->itable, rss_state->itable_size);
532 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
534 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
535 lio_dev_err(lio_dev, "Failed to set rss hash\n");
539 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
540 lio_dev_err(lio_dev, "Set rss hash timed out\n");
548 lio_dev_rss_reta_query(struct rte_eth_dev *eth_dev,
549 struct rte_eth_rss_reta_entry64 *reta_conf,
552 struct lio_device *lio_dev = LIO_DEV(eth_dev);
553 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
556 if (reta_size != LIO_RSS_MAX_TABLE_SZ) {
558 "The size of hash lookup table configured (%d) doesn't match the number hardware can supported (%d)\n",
559 reta_size, LIO_RSS_MAX_TABLE_SZ);
563 num = reta_size / RTE_RETA_GROUP_SIZE;
565 for (i = 0; i < num; i++) {
566 memcpy(reta_conf->reta,
567 &rss_state->itable[i * RTE_RETA_GROUP_SIZE],
568 RTE_RETA_GROUP_SIZE);
576 lio_dev_rss_hash_conf_get(struct rte_eth_dev *eth_dev,
577 struct rte_eth_rss_conf *rss_conf)
579 struct lio_device *lio_dev = LIO_DEV(eth_dev);
580 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
581 uint8_t *hash_key = NULL;
584 if (rss_state->hash_disable) {
585 lio_dev_info(lio_dev, "RSS disabled in nic\n");
586 rss_conf->rss_hf = 0;
591 hash_key = rss_conf->rss_key;
592 if (hash_key != NULL)
593 memcpy(hash_key, rss_state->hash_key, rss_state->hash_key_size);
596 rss_hf |= ETH_RSS_IPV4;
597 if (rss_state->tcp_hash)
598 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
600 rss_hf |= ETH_RSS_IPV6;
601 if (rss_state->ipv6_tcp_hash)
602 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
603 if (rss_state->ipv6_ex)
604 rss_hf |= ETH_RSS_IPV6_EX;
605 if (rss_state->ipv6_tcp_ex_hash)
606 rss_hf |= ETH_RSS_IPV6_TCP_EX;
608 rss_conf->rss_hf = rss_hf;
614 lio_dev_rss_hash_update(struct rte_eth_dev *eth_dev,
615 struct rte_eth_rss_conf *rss_conf)
617 struct lio_device *lio_dev = LIO_DEV(eth_dev);
618 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
619 struct lio_rss_set *rss_param;
620 struct lio_dev_ctrl_cmd ctrl_cmd;
621 struct lio_ctrl_pkt ctrl_pkt;
623 if (!lio_dev->intf_open) {
624 lio_dev_err(lio_dev, "Port %d down, can't update hash\n",
629 /* flush added to prevent cmd failure
630 * incase the queue is full
632 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
634 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
635 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
637 rss_param = (struct lio_rss_set *)&ctrl_pkt.udd[0];
639 ctrl_cmd.eth_dev = eth_dev;
642 ctrl_pkt.ncmd.s.cmd = LIO_CMD_SET_RSS;
643 ctrl_pkt.ncmd.s.more = sizeof(struct lio_rss_set) >> 3;
644 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
646 rss_param->param.flags = 0xF;
648 if (rss_conf->rss_key) {
649 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_KEY_UNCHANGED;
650 rss_state->hash_key_size = LIO_RSS_MAX_KEY_SZ;
651 rss_param->param.hashkeysize = LIO_RSS_MAX_KEY_SZ;
652 memcpy(rss_state->hash_key, rss_conf->rss_key,
653 rss_state->hash_key_size);
654 memcpy(rss_param->key, rss_state->hash_key,
655 rss_state->hash_key_size);
658 if ((rss_conf->rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
659 /* Can't disable rss through hash flags,
660 * if it is enabled by default during init
662 if (!rss_state->hash_disable)
665 /* This is for --disable-rss during testpmd launch */
666 rss_param->param.flags |= LIO_RSS_PARAM_DISABLE_RSS;
668 uint32_t hashinfo = 0;
670 /* Can't enable rss if disabled by default during init */
671 if (rss_state->hash_disable)
674 if (rss_conf->rss_hf & ETH_RSS_IPV4) {
675 hashinfo |= LIO_RSS_HASH_IPV4;
681 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) {
682 hashinfo |= LIO_RSS_HASH_TCP_IPV4;
683 rss_state->tcp_hash = 1;
685 rss_state->tcp_hash = 0;
688 if (rss_conf->rss_hf & ETH_RSS_IPV6) {
689 hashinfo |= LIO_RSS_HASH_IPV6;
695 if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) {
696 hashinfo |= LIO_RSS_HASH_TCP_IPV6;
697 rss_state->ipv6_tcp_hash = 1;
699 rss_state->ipv6_tcp_hash = 0;
702 if (rss_conf->rss_hf & ETH_RSS_IPV6_EX) {
703 hashinfo |= LIO_RSS_HASH_IPV6_EX;
704 rss_state->ipv6_ex = 1;
706 rss_state->ipv6_ex = 0;
709 if (rss_conf->rss_hf & ETH_RSS_IPV6_TCP_EX) {
710 hashinfo |= LIO_RSS_HASH_TCP_IPV6_EX;
711 rss_state->ipv6_tcp_ex_hash = 1;
713 rss_state->ipv6_tcp_ex_hash = 0;
716 rss_param->param.flags &= ~LIO_RSS_PARAM_HASH_INFO_UNCHANGED;
717 rss_param->param.hashinfo = hashinfo;
720 lio_swap_8B_data((uint64_t *)rss_param, LIO_RSS_PARAM_SIZE >> 3);
722 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
723 lio_dev_err(lio_dev, "Failed to set rss hash\n");
727 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
728 lio_dev_err(lio_dev, "Set rss hash timed out\n");
736 * Add vxlan dest udp port for an interface.
739 * Pointer to the structure rte_eth_dev
744 * On success return 0
745 * On failure return -1
748 lio_dev_udp_tunnel_add(struct rte_eth_dev *eth_dev,
749 struct rte_eth_udp_tunnel *udp_tnl)
751 struct lio_device *lio_dev = LIO_DEV(eth_dev);
752 struct lio_dev_ctrl_cmd ctrl_cmd;
753 struct lio_ctrl_pkt ctrl_pkt;
758 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
759 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
763 /* flush added to prevent cmd failure
764 * incase the queue is full
766 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
768 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
769 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
771 ctrl_cmd.eth_dev = eth_dev;
774 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
775 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
776 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_ADD;
777 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
779 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
780 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_ADD command\n");
784 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
785 lio_dev_err(lio_dev, "VXLAN_PORT_ADD command timed out\n");
793 * Remove vxlan dest udp port for an interface.
796 * Pointer to the structure rte_eth_dev
801 * On success return 0
802 * On failure return -1
805 lio_dev_udp_tunnel_del(struct rte_eth_dev *eth_dev,
806 struct rte_eth_udp_tunnel *udp_tnl)
808 struct lio_device *lio_dev = LIO_DEV(eth_dev);
809 struct lio_dev_ctrl_cmd ctrl_cmd;
810 struct lio_ctrl_pkt ctrl_pkt;
815 if (udp_tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN) {
816 lio_dev_err(lio_dev, "Unsupported tunnel type\n");
820 /* flush added to prevent cmd failure
821 * incase the queue is full
823 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
825 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
826 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
828 ctrl_cmd.eth_dev = eth_dev;
831 ctrl_pkt.ncmd.s.cmd = LIO_CMD_VXLAN_PORT_CONFIG;
832 ctrl_pkt.ncmd.s.param1 = udp_tnl->udp_port;
833 ctrl_pkt.ncmd.s.more = LIO_CMD_VXLAN_PORT_DEL;
834 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
836 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
837 lio_dev_err(lio_dev, "Failed to send VXLAN_PORT_DEL command\n");
841 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
842 lio_dev_err(lio_dev, "VXLAN_PORT_DEL command timed out\n");
850 lio_dev_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on)
852 struct lio_device *lio_dev = LIO_DEV(eth_dev);
853 struct lio_dev_ctrl_cmd ctrl_cmd;
854 struct lio_ctrl_pkt ctrl_pkt;
856 if (lio_dev->linfo.vlan_is_admin_assigned)
859 /* flush added to prevent cmd failure
860 * incase the queue is full
862 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
864 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
865 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
867 ctrl_cmd.eth_dev = eth_dev;
870 ctrl_pkt.ncmd.s.cmd = on ?
871 LIO_CMD_ADD_VLAN_FILTER : LIO_CMD_DEL_VLAN_FILTER;
872 ctrl_pkt.ncmd.s.param1 = vlan_id;
873 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
875 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
876 lio_dev_err(lio_dev, "Failed to %s VLAN port\n",
877 on ? "add" : "remove");
881 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd)) {
882 lio_dev_err(lio_dev, "Command to %s VLAN port timed out\n",
883 on ? "add" : "remove");
891 * Atomically writes the link status information into global
892 * structure rte_eth_dev.
895 * - Pointer to the structure rte_eth_dev to read from.
896 * - Pointer to the buffer to be saved with the link status.
899 * - On success, zero.
900 * - On failure, negative value.
903 lio_dev_atomic_write_link_status(struct rte_eth_dev *eth_dev,
904 struct rte_eth_link *link)
906 struct rte_eth_link *dst = ð_dev->data->dev_link;
907 struct rte_eth_link *src = link;
909 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
910 *(uint64_t *)src) == 0)
917 lio_hweight64(uint64_t w)
919 uint64_t res = w - ((w >> 1) & 0x5555555555555555ul);
922 (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
923 res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
924 res = res + (res >> 8);
925 res = res + (res >> 16);
927 return (res + (res >> 32)) & 0x00000000000000FFul;
931 lio_dev_link_update(struct rte_eth_dev *eth_dev,
932 int wait_to_complete __rte_unused)
934 struct lio_device *lio_dev = LIO_DEV(eth_dev);
935 struct rte_eth_link link, old;
938 link.link_status = ETH_LINK_DOWN;
939 link.link_speed = ETH_SPEED_NUM_NONE;
940 link.link_duplex = ETH_LINK_HALF_DUPLEX;
941 memset(&old, 0, sizeof(old));
943 /* Return what we found */
944 if (lio_dev->linfo.link.s.link_up == 0) {
945 /* Interface is down */
946 if (lio_dev_atomic_write_link_status(eth_dev, &link))
948 if (link.link_status == old.link_status)
953 link.link_status = ETH_LINK_UP; /* Interface is up */
954 link.link_duplex = ETH_LINK_FULL_DUPLEX;
955 switch (lio_dev->linfo.link.s.speed) {
956 case LIO_LINK_SPEED_10000:
957 link.link_speed = ETH_SPEED_NUM_10G;
959 case LIO_LINK_SPEED_25000:
960 link.link_speed = ETH_SPEED_NUM_25G;
963 link.link_speed = ETH_SPEED_NUM_NONE;
964 link.link_duplex = ETH_LINK_HALF_DUPLEX;
967 if (lio_dev_atomic_write_link_status(eth_dev, &link))
970 if (link.link_status == old.link_status)
977 * \brief Net device enable, disable allmulticast
978 * @param eth_dev Pointer to the structure rte_eth_dev
981 lio_change_dev_flag(struct rte_eth_dev *eth_dev)
983 struct lio_device *lio_dev = LIO_DEV(eth_dev);
984 struct lio_dev_ctrl_cmd ctrl_cmd;
985 struct lio_ctrl_pkt ctrl_pkt;
987 /* flush added to prevent cmd failure
988 * incase the queue is full
990 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
992 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
993 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
995 ctrl_cmd.eth_dev = eth_dev;
998 /* Create a ctrl pkt command to be sent to core app. */
999 ctrl_pkt.ncmd.s.cmd = LIO_CMD_CHANGE_DEVFLAGS;
1000 ctrl_pkt.ncmd.s.param1 = lio_dev->ifflags;
1001 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1003 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1004 lio_dev_err(lio_dev, "Failed to send change flag message\n");
1008 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1009 lio_dev_err(lio_dev, "Change dev flag command timed out\n");
1013 lio_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
1015 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1017 if (!lio_dev->intf_open) {
1018 lio_dev_err(lio_dev, "Port %d down, can't enable multicast\n",
1023 lio_dev->ifflags |= LIO_IFFLAG_ALLMULTI;
1024 lio_change_dev_flag(eth_dev);
1028 lio_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
1030 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1032 if (!lio_dev->intf_open) {
1033 lio_dev_err(lio_dev, "Port %d down, can't disable multicast\n",
1038 lio_dev->ifflags &= ~LIO_IFFLAG_ALLMULTI;
1039 lio_change_dev_flag(eth_dev);
1043 lio_dev_rss_configure(struct rte_eth_dev *eth_dev)
1045 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1046 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1047 struct rte_eth_rss_reta_entry64 reta_conf[8];
1048 struct rte_eth_rss_conf rss_conf;
1051 /* Configure the RSS key and the RSS protocols used to compute
1052 * the RSS hash of input packets.
1054 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
1055 if ((rss_conf.rss_hf & LIO_RSS_OFFLOAD_ALL) == 0) {
1056 rss_state->hash_disable = 1;
1057 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1061 if (rss_conf.rss_key == NULL)
1062 rss_conf.rss_key = lio_rss_key; /* Default hash key */
1064 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1066 memset(reta_conf, 0, sizeof(reta_conf));
1067 for (i = 0; i < LIO_RSS_MAX_TABLE_SZ; i++) {
1068 uint8_t q_idx, conf_idx, reta_idx;
1070 q_idx = (uint8_t)((eth_dev->data->nb_rx_queues > 1) ?
1071 i % eth_dev->data->nb_rx_queues : 0);
1072 conf_idx = i / RTE_RETA_GROUP_SIZE;
1073 reta_idx = i % RTE_RETA_GROUP_SIZE;
1074 reta_conf[conf_idx].reta[reta_idx] = q_idx;
1075 reta_conf[conf_idx].mask |= ((uint64_t)1 << reta_idx);
1078 lio_dev_rss_reta_update(eth_dev, reta_conf, LIO_RSS_MAX_TABLE_SZ);
1082 lio_dev_mq_rx_configure(struct rte_eth_dev *eth_dev)
1084 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1085 struct lio_rss_ctx *rss_state = &lio_dev->rss_state;
1086 struct rte_eth_rss_conf rss_conf;
1088 switch (eth_dev->data->dev_conf.rxmode.mq_mode) {
1090 lio_dev_rss_configure(eth_dev);
1092 case ETH_MQ_RX_NONE:
1093 /* if mq_mode is none, disable rss mode. */
1095 memset(&rss_conf, 0, sizeof(rss_conf));
1096 rss_state->hash_disable = 1;
1097 lio_dev_rss_hash_update(eth_dev, &rss_conf);
1102 * Setup our receive queue/ringbuffer. This is the
1103 * queue the Octeon uses to send us packets and
1104 * responses. We are given a memory pool for our
1105 * packet buffers that are used to populate the receive
1109 * Pointer to the structure rte_eth_dev
1112 * @param num_rx_descs
1113 * Number of entries in the queue
1115 * Where to allocate memory
1117 * Pointer to the struction rte_eth_rxconf
1119 * Pointer to the packet pool
1122 * - On success, return 0
1123 * - On failure, return -1
1126 lio_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1127 uint16_t num_rx_descs, unsigned int socket_id,
1128 const struct rte_eth_rxconf *rx_conf __rte_unused,
1129 struct rte_mempool *mp)
1131 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1132 struct rte_pktmbuf_pool_private *mbp_priv;
1133 uint32_t fw_mapped_oq;
1136 if (q_no >= lio_dev->nb_rx_queues) {
1137 lio_dev_err(lio_dev, "Invalid rx queue number %u\n", q_no);
1141 lio_dev_dbg(lio_dev, "setting up rx queue %u\n", q_no);
1143 fw_mapped_oq = lio_dev->linfo.rxpciq[q_no].s.q_no;
1145 if ((lio_dev->droq[fw_mapped_oq]) &&
1146 (num_rx_descs != lio_dev->droq[fw_mapped_oq]->max_count)) {
1147 lio_dev_err(lio_dev,
1148 "Reconfiguring Rx descs not supported. Configure descs to same value %u or restart application\n",
1149 lio_dev->droq[fw_mapped_oq]->max_count);
1153 mbp_priv = rte_mempool_get_priv(mp);
1154 buf_size = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM;
1156 if (lio_setup_droq(lio_dev, fw_mapped_oq, num_rx_descs, buf_size, mp,
1158 lio_dev_err(lio_dev, "droq allocation failed\n");
1162 eth_dev->data->rx_queues[q_no] = lio_dev->droq[fw_mapped_oq];
1168 * Release the receive queue/ringbuffer. Called by
1172 * Opaque pointer to the receive queue to release
1178 lio_dev_rx_queue_release(void *rxq)
1180 struct lio_droq *droq = rxq;
1184 /* Run time queue deletion not supported */
1185 if (droq->lio_dev->port_configured)
1189 lio_delete_droq_queue(droq->lio_dev, oq_no);
1194 * Allocate and initialize SW ring. Initialize associated HW registers.
1197 * Pointer to structure rte_eth_dev
1202 * @param num_tx_descs
1203 * Number of ringbuffer descriptors
1206 * NUMA socket id, used for memory allocations
1209 * Pointer to the structure rte_eth_txconf
1212 * - On success, return 0
1213 * - On failure, return -errno value
1216 lio_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t q_no,
1217 uint16_t num_tx_descs, unsigned int socket_id,
1218 const struct rte_eth_txconf *tx_conf __rte_unused)
1220 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1221 int fw_mapped_iq = lio_dev->linfo.txpciq[q_no].s.q_no;
1224 if (q_no >= lio_dev->nb_tx_queues) {
1225 lio_dev_err(lio_dev, "Invalid tx queue number %u\n", q_no);
1229 lio_dev_dbg(lio_dev, "setting up tx queue %u\n", q_no);
1231 if ((lio_dev->instr_queue[fw_mapped_iq] != NULL) &&
1232 (num_tx_descs != lio_dev->instr_queue[fw_mapped_iq]->max_count)) {
1233 lio_dev_err(lio_dev,
1234 "Reconfiguring Tx descs not supported. Configure descs to same value %u or restart application\n",
1235 lio_dev->instr_queue[fw_mapped_iq]->max_count);
1239 retval = lio_setup_iq(lio_dev, q_no, lio_dev->linfo.txpciq[q_no],
1240 num_tx_descs, lio_dev, socket_id);
1243 lio_dev_err(lio_dev, "Runtime IQ(TxQ) creation failed.\n");
1247 retval = lio_setup_sglists(lio_dev, q_no, fw_mapped_iq,
1248 lio_dev->instr_queue[fw_mapped_iq]->max_count,
1252 lio_delete_instruction_queue(lio_dev, fw_mapped_iq);
1256 eth_dev->data->tx_queues[q_no] = lio_dev->instr_queue[fw_mapped_iq];
1262 * Release the transmit queue/ringbuffer. Called by
1266 * Opaque pointer to the transmit queue to release
1272 lio_dev_tx_queue_release(void *txq)
1274 struct lio_instr_queue *tq = txq;
1275 uint32_t fw_mapped_iq_no;
1279 /* Run time queue deletion not supported */
1280 if (tq->lio_dev->port_configured)
1284 lio_delete_sglist(tq);
1286 fw_mapped_iq_no = tq->txpciq.s.q_no;
1287 lio_delete_instruction_queue(tq->lio_dev, fw_mapped_iq_no);
1292 * Api to check link state.
1295 lio_dev_get_link_status(struct rte_eth_dev *eth_dev)
1297 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1298 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1299 struct lio_link_status_resp *resp;
1300 union octeon_link_status *ls;
1301 struct lio_soft_command *sc;
1304 if (!lio_dev->intf_open)
1307 resp_size = sizeof(struct lio_link_status_resp);
1308 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1312 resp = (struct lio_link_status_resp *)sc->virtrptr;
1313 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1314 LIO_OPCODE_INFO, 0, 0, 0);
1316 /* Setting wait time in seconds */
1317 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1319 if (lio_send_soft_command(lio_dev, sc) == LIO_IQ_SEND_FAILED)
1320 goto get_status_fail;
1322 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1323 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1328 goto get_status_fail;
1330 ls = &resp->link_info.link;
1332 lio_swap_8B_data((uint64_t *)ls, sizeof(union octeon_link_status) >> 3);
1334 if (lio_dev->linfo.link.link_status64 != ls->link_status64) {
1335 lio_dev->linfo.link.link_status64 = ls->link_status64;
1336 lio_dev_link_update(eth_dev, 0);
1339 lio_free_soft_command(sc);
1344 lio_free_soft_command(sc);
1347 /* This function will be invoked every LSC_TIMEOUT ns (100ms)
1348 * and will update link state if it changes.
1351 lio_sync_link_state_check(void *eth_dev)
1353 struct lio_device *lio_dev =
1354 (((struct rte_eth_dev *)eth_dev)->data->dev_private);
1356 if (lio_dev->port_configured)
1357 lio_dev_get_link_status(eth_dev);
1359 /* Schedule periodic link status check.
1360 * Stop check if interface is close and start again while opening.
1362 if (lio_dev->intf_open)
1363 rte_eal_alarm_set(LIO_LSC_TIMEOUT, lio_sync_link_state_check,
1368 lio_dev_start(struct rte_eth_dev *eth_dev)
1370 uint16_t mtu = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
1371 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1372 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1375 lio_dev_info(lio_dev, "Starting port %d\n", eth_dev->data->port_id);
1377 if (lio_dev->fn_list.enable_io_queues(lio_dev))
1380 if (lio_send_rx_ctrl_cmd(eth_dev, 1))
1383 /* Ready for link status updates */
1384 lio_dev->intf_open = 1;
1387 /* Configure RSS if device configured with multiple RX queues. */
1388 lio_dev_mq_rx_configure(eth_dev);
1390 /* start polling for lsc */
1391 ret = rte_eal_alarm_set(LIO_LSC_TIMEOUT,
1392 lio_sync_link_state_check,
1395 lio_dev_err(lio_dev,
1396 "link state check handler creation failed\n");
1397 goto dev_lsc_handle_error;
1400 while ((lio_dev->linfo.link.link_status64 == 0) && (--timeout))
1403 if (lio_dev->linfo.link.link_status64 == 0) {
1405 goto dev_mtu_check_error;
1408 if (lio_dev->linfo.link.s.mtu != mtu) {
1409 ret = lio_dev_validate_vf_mtu(eth_dev, mtu);
1411 goto dev_mtu_check_error;
1416 dev_mtu_check_error:
1417 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1419 dev_lsc_handle_error:
1420 lio_dev->intf_open = 0;
1421 lio_send_rx_ctrl_cmd(eth_dev, 0);
1426 /* Stop device and disable input/output functions */
1428 lio_dev_stop(struct rte_eth_dev *eth_dev)
1430 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1432 lio_dev_info(lio_dev, "Stopping port %d\n", eth_dev->data->port_id);
1433 lio_dev->intf_open = 0;
1436 /* Cancel callback if still running. */
1437 rte_eal_alarm_cancel(lio_sync_link_state_check, eth_dev);
1439 lio_send_rx_ctrl_cmd(eth_dev, 0);
1441 /* Clear recorded link status */
1442 lio_dev->linfo.link.link_status64 = 0;
1446 lio_dev_set_link_up(struct rte_eth_dev *eth_dev)
1448 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1450 if (!lio_dev->intf_open) {
1451 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1455 if (lio_dev->linfo.link.s.link_up) {
1456 lio_dev_info(lio_dev, "Link is already UP\n");
1460 if (lio_send_rx_ctrl_cmd(eth_dev, 1)) {
1461 lio_dev_err(lio_dev, "Unable to set Link UP\n");
1465 lio_dev->linfo.link.s.link_up = 1;
1466 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1472 lio_dev_set_link_down(struct rte_eth_dev *eth_dev)
1474 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1476 if (!lio_dev->intf_open) {
1477 lio_dev_info(lio_dev, "Port is stopped, Start the port first\n");
1481 if (!lio_dev->linfo.link.s.link_up) {
1482 lio_dev_info(lio_dev, "Link is already DOWN\n");
1486 lio_dev->linfo.link.s.link_up = 0;
1487 eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
1489 if (lio_send_rx_ctrl_cmd(eth_dev, 0)) {
1490 lio_dev->linfo.link.s.link_up = 1;
1491 eth_dev->data->dev_link.link_status = ETH_LINK_UP;
1492 lio_dev_err(lio_dev, "Unable to set Link Down\n");
1500 * Reset and stop the device. This occurs on the first
1501 * call to this routine. Subsequent calls will simply
1502 * return. NB: This will require the NIC to be rebooted.
1505 * Pointer to the structure rte_eth_dev
1511 lio_dev_close(struct rte_eth_dev *eth_dev)
1513 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1516 lio_dev_info(lio_dev, "closing port %d\n", eth_dev->data->port_id);
1518 if (lio_dev->intf_open)
1519 lio_dev_stop(eth_dev);
1521 lio_wait_for_instr_fetch(lio_dev);
1523 lio_dev->fn_list.disable_io_queues(lio_dev);
1525 cn23xx_vf_set_io_queues_off(lio_dev);
1527 /* Reset iq regs (IQ_DBELL).
1528 * Clear sli_pktx_cnts (OQ_PKTS_SENT).
1530 for (i = 0; i < lio_dev->nb_rx_queues; i++) {
1531 struct lio_droq *droq = lio_dev->droq[i];
1536 uint32_t pkt_count = rte_read32(droq->pkts_sent_reg);
1538 lio_dev_dbg(lio_dev,
1539 "pending oq count %u\n", pkt_count);
1540 rte_write32(pkt_count, droq->pkts_sent_reg);
1543 /* Do FLR for the VF */
1544 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1547 lio_dev->fn_list.free_mbox(lio_dev);
1549 /* Free glist resources */
1550 rte_free(lio_dev->glist_head);
1551 rte_free(lio_dev->glist_lock);
1552 lio_dev->glist_head = NULL;
1553 lio_dev->glist_lock = NULL;
1555 lio_dev->port_configured = 0;
1557 /* Delete all queues */
1558 lio_dev_clear_queues(eth_dev);
1562 * Enable tunnel rx checksum verification from firmware.
1565 lio_enable_hw_tunnel_rx_checksum(struct rte_eth_dev *eth_dev)
1567 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1568 struct lio_dev_ctrl_cmd ctrl_cmd;
1569 struct lio_ctrl_pkt ctrl_pkt;
1571 /* flush added to prevent cmd failure
1572 * incase the queue is full
1574 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1576 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1577 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1579 ctrl_cmd.eth_dev = eth_dev;
1582 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_RX_CSUM_CTL;
1583 ctrl_pkt.ncmd.s.param1 = LIO_CMD_RXCSUM_ENABLE;
1584 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1586 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1587 lio_dev_err(lio_dev, "Failed to send TNL_RX_CSUM command\n");
1591 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1592 lio_dev_err(lio_dev, "TNL_RX_CSUM command timed out\n");
1596 * Enable checksum calculation for inner packet in a tunnel.
1599 lio_enable_hw_tunnel_tx_checksum(struct rte_eth_dev *eth_dev)
1601 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1602 struct lio_dev_ctrl_cmd ctrl_cmd;
1603 struct lio_ctrl_pkt ctrl_pkt;
1605 /* flush added to prevent cmd failure
1606 * incase the queue is full
1608 lio_flush_iq(lio_dev, lio_dev->instr_queue[0]);
1610 memset(&ctrl_pkt, 0, sizeof(struct lio_ctrl_pkt));
1611 memset(&ctrl_cmd, 0, sizeof(struct lio_dev_ctrl_cmd));
1613 ctrl_cmd.eth_dev = eth_dev;
1616 ctrl_pkt.ncmd.s.cmd = LIO_CMD_TNL_TX_CSUM_CTL;
1617 ctrl_pkt.ncmd.s.param1 = LIO_CMD_TXCSUM_ENABLE;
1618 ctrl_pkt.ctrl_cmd = &ctrl_cmd;
1620 if (lio_send_ctrl_pkt(lio_dev, &ctrl_pkt)) {
1621 lio_dev_err(lio_dev, "Failed to send TNL_TX_CSUM command\n");
1625 if (lio_wait_for_ctrl_cmd(lio_dev, &ctrl_cmd))
1626 lio_dev_err(lio_dev, "TNL_TX_CSUM command timed out\n");
1629 static int lio_dev_configure(struct rte_eth_dev *eth_dev)
1631 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1632 uint16_t timeout = LIO_MAX_CMD_TIMEOUT;
1633 int retval, num_iqueues, num_oqueues;
1634 uint8_t mac[ETHER_ADDR_LEN], i;
1635 struct lio_if_cfg_resp *resp;
1636 struct lio_soft_command *sc;
1637 union lio_if_cfg if_cfg;
1640 PMD_INIT_FUNC_TRACE();
1642 /* Re-configuring firmware not supported.
1643 * Can't change tx/rx queues per port from initial value.
1645 if (lio_dev->port_configured) {
1646 if ((lio_dev->nb_rx_queues != eth_dev->data->nb_rx_queues) ||
1647 (lio_dev->nb_tx_queues != eth_dev->data->nb_tx_queues)) {
1648 lio_dev_err(lio_dev,
1649 "rxq/txq re-conf not supported. Restart application with new value.\n");
1655 lio_dev->nb_rx_queues = eth_dev->data->nb_rx_queues;
1656 lio_dev->nb_tx_queues = eth_dev->data->nb_tx_queues;
1658 resp_size = sizeof(struct lio_if_cfg_resp);
1659 sc = lio_alloc_soft_command(lio_dev, 0, resp_size, 0);
1663 resp = (struct lio_if_cfg_resp *)sc->virtrptr;
1665 /* Firmware doesn't have capability to reconfigure the queues,
1666 * Claim all queues, and use as many required
1668 if_cfg.if_cfg64 = 0;
1669 if_cfg.s.num_iqueues = lio_dev->nb_tx_queues;
1670 if_cfg.s.num_oqueues = lio_dev->nb_rx_queues;
1671 if_cfg.s.base_queue = 0;
1673 if_cfg.s.gmx_port_id = lio_dev->pf_num;
1675 lio_prepare_soft_command(lio_dev, sc, LIO_OPCODE,
1676 LIO_OPCODE_IF_CFG, 0,
1677 if_cfg.if_cfg64, 0);
1679 /* Setting wait time in seconds */
1680 sc->wait_time = LIO_MAX_CMD_TIMEOUT / 1000;
1682 retval = lio_send_soft_command(lio_dev, sc);
1683 if (retval == LIO_IQ_SEND_FAILED) {
1684 lio_dev_err(lio_dev, "iq/oq config failed status: %x\n",
1686 /* Soft instr is freed by driver in case of failure. */
1687 goto nic_config_fail;
1690 /* Sleep on a wait queue till the cond flag indicates that the
1691 * response arrived or timed-out.
1693 while ((*sc->status_word == LIO_COMPLETION_WORD_INIT) && --timeout) {
1694 lio_flush_iq(lio_dev, lio_dev->instr_queue[sc->iq_no]);
1695 lio_process_ordered_list(lio_dev);
1699 retval = resp->status;
1701 lio_dev_err(lio_dev, "iq/oq config failed\n");
1702 goto nic_config_fail;
1705 lio_swap_8B_data((uint64_t *)(&resp->cfg_info),
1706 sizeof(struct octeon_if_cfg_info) >> 3);
1708 num_iqueues = lio_hweight64(resp->cfg_info.iqmask);
1709 num_oqueues = lio_hweight64(resp->cfg_info.oqmask);
1711 if (!(num_iqueues) || !(num_oqueues)) {
1712 lio_dev_err(lio_dev,
1713 "Got bad iqueues (%016lx) or oqueues (%016lx) from firmware.\n",
1714 (unsigned long)resp->cfg_info.iqmask,
1715 (unsigned long)resp->cfg_info.oqmask);
1716 goto nic_config_fail;
1719 lio_dev_dbg(lio_dev,
1720 "interface %d, iqmask %016lx, oqmask %016lx, numiqueues %d, numoqueues %d\n",
1721 eth_dev->data->port_id,
1722 (unsigned long)resp->cfg_info.iqmask,
1723 (unsigned long)resp->cfg_info.oqmask,
1724 num_iqueues, num_oqueues);
1726 lio_dev->linfo.num_rxpciq = num_oqueues;
1727 lio_dev->linfo.num_txpciq = num_iqueues;
1729 for (i = 0; i < num_oqueues; i++) {
1730 lio_dev->linfo.rxpciq[i].rxpciq64 =
1731 resp->cfg_info.linfo.rxpciq[i].rxpciq64;
1732 lio_dev_dbg(lio_dev, "index %d OQ %d\n",
1733 i, lio_dev->linfo.rxpciq[i].s.q_no);
1736 for (i = 0; i < num_iqueues; i++) {
1737 lio_dev->linfo.txpciq[i].txpciq64 =
1738 resp->cfg_info.linfo.txpciq[i].txpciq64;
1739 lio_dev_dbg(lio_dev, "index %d IQ %d\n",
1740 i, lio_dev->linfo.txpciq[i].s.q_no);
1743 lio_dev->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
1744 lio_dev->linfo.gmxport = resp->cfg_info.linfo.gmxport;
1745 lio_dev->linfo.link.link_status64 =
1746 resp->cfg_info.linfo.link.link_status64;
1748 /* 64-bit swap required on LE machines */
1749 lio_swap_8B_data(&lio_dev->linfo.hw_addr, 1);
1750 for (i = 0; i < ETHER_ADDR_LEN; i++)
1751 mac[i] = *((uint8_t *)(((uint8_t *)&lio_dev->linfo.hw_addr) +
1754 /* Copy the permanent MAC address */
1755 ether_addr_copy((struct ether_addr *)mac, ð_dev->data->mac_addrs[0]);
1757 /* enable firmware checksum support for tunnel packets */
1758 lio_enable_hw_tunnel_rx_checksum(eth_dev);
1759 lio_enable_hw_tunnel_tx_checksum(eth_dev);
1761 lio_dev->glist_lock =
1762 rte_zmalloc(NULL, sizeof(*lio_dev->glist_lock) * num_iqueues, 0);
1763 if (lio_dev->glist_lock == NULL)
1766 lio_dev->glist_head =
1767 rte_zmalloc(NULL, sizeof(*lio_dev->glist_head) * num_iqueues,
1769 if (lio_dev->glist_head == NULL) {
1770 rte_free(lio_dev->glist_lock);
1771 lio_dev->glist_lock = NULL;
1775 lio_dev_link_update(eth_dev, 0);
1777 lio_dev->port_configured = 1;
1779 lio_free_soft_command(sc);
1781 /* Disable iq_0 for reconf */
1782 lio_dev->fn_list.disable_io_queues(lio_dev);
1784 /* Reset ioq regs */
1785 lio_dev->fn_list.setup_device_regs(lio_dev);
1787 /* Free iq_0 used during init */
1788 lio_free_instr_queue0(lio_dev);
1793 lio_dev_err(lio_dev, "Failed retval %d\n", retval);
1794 lio_free_soft_command(sc);
1795 lio_free_instr_queue0(lio_dev);
1800 /* Define our ethernet definitions */
1801 static const struct eth_dev_ops liovf_eth_dev_ops = {
1802 .dev_configure = lio_dev_configure,
1803 .dev_start = lio_dev_start,
1804 .dev_stop = lio_dev_stop,
1805 .dev_set_link_up = lio_dev_set_link_up,
1806 .dev_set_link_down = lio_dev_set_link_down,
1807 .dev_close = lio_dev_close,
1808 .allmulticast_enable = lio_dev_allmulticast_enable,
1809 .allmulticast_disable = lio_dev_allmulticast_disable,
1810 .link_update = lio_dev_link_update,
1811 .stats_get = lio_dev_stats_get,
1812 .xstats_get = lio_dev_xstats_get,
1813 .xstats_get_names = lio_dev_xstats_get_names,
1814 .stats_reset = lio_dev_stats_reset,
1815 .xstats_reset = lio_dev_xstats_reset,
1816 .dev_infos_get = lio_dev_info_get,
1817 .vlan_filter_set = lio_dev_vlan_filter_set,
1818 .rx_queue_setup = lio_dev_rx_queue_setup,
1819 .rx_queue_release = lio_dev_rx_queue_release,
1820 .tx_queue_setup = lio_dev_tx_queue_setup,
1821 .tx_queue_release = lio_dev_tx_queue_release,
1822 .reta_update = lio_dev_rss_reta_update,
1823 .reta_query = lio_dev_rss_reta_query,
1824 .rss_hash_conf_get = lio_dev_rss_hash_conf_get,
1825 .rss_hash_update = lio_dev_rss_hash_update,
1826 .udp_tunnel_port_add = lio_dev_udp_tunnel_add,
1827 .udp_tunnel_port_del = lio_dev_udp_tunnel_del,
1831 lio_check_pf_hs_response(void *lio_dev)
1833 struct lio_device *dev = lio_dev;
1835 /* check till response arrives */
1836 if (dev->pfvf_hsword.coproc_tics_per_us)
1839 cn23xx_vf_handle_mbox(dev);
1841 rte_eal_alarm_set(1, lio_check_pf_hs_response, lio_dev);
1845 * \brief Identify the LIO device and to map the BAR address space
1846 * @param lio_dev lio device
1849 lio_chip_specific_setup(struct lio_device *lio_dev)
1851 struct rte_pci_device *pdev = lio_dev->pci_dev;
1852 uint32_t dev_id = pdev->id.device_id;
1857 case LIO_CN23XX_VF_VID:
1858 lio_dev->chip_id = LIO_CN23XX_VF_VID;
1859 ret = cn23xx_vf_setup_device(lio_dev);
1864 lio_dev_err(lio_dev, "Unsupported Chip\n");
1868 lio_dev_info(lio_dev, "DEVICE : %s\n", s);
1874 lio_first_time_init(struct lio_device *lio_dev,
1875 struct rte_pci_device *pdev)
1879 PMD_INIT_FUNC_TRACE();
1881 /* set dpdk specific pci device pointer */
1882 lio_dev->pci_dev = pdev;
1884 /* Identify the LIO type and set device ops */
1885 if (lio_chip_specific_setup(lio_dev)) {
1886 lio_dev_err(lio_dev, "Chip specific setup failed\n");
1890 /* Initialize soft command buffer pool */
1891 if (lio_setup_sc_buffer_pool(lio_dev)) {
1892 lio_dev_err(lio_dev, "sc buffer pool allocation failed\n");
1896 /* Initialize lists to manage the requests of different types that
1897 * arrive from applications for this lio device.
1899 lio_setup_response_list(lio_dev);
1901 if (lio_dev->fn_list.setup_mbox(lio_dev)) {
1902 lio_dev_err(lio_dev, "Mailbox setup failed\n");
1906 /* Check PF response */
1907 lio_check_pf_hs_response((void *)lio_dev);
1909 /* Do handshake and exit if incompatible PF driver */
1910 if (cn23xx_pfvf_handshake(lio_dev))
1914 cn23xx_vf_ask_pf_to_do_flr(lio_dev);
1915 /* Wait for FLR for 100ms per SRIOV specification */
1918 if (cn23xx_vf_set_io_queues_off(lio_dev)) {
1919 lio_dev_err(lio_dev, "Setting io queues off failed\n");
1923 if (lio_dev->fn_list.setup_device_regs(lio_dev)) {
1924 lio_dev_err(lio_dev, "Failed to configure device registers\n");
1928 if (lio_setup_instr_queue0(lio_dev)) {
1929 lio_dev_err(lio_dev, "Failed to setup instruction queue 0\n");
1933 dpdk_queues = (int)lio_dev->sriov_info.rings_per_vf;
1935 lio_dev->max_tx_queues = dpdk_queues;
1936 lio_dev->max_rx_queues = dpdk_queues;
1938 /* Enable input and output queues for this device */
1939 if (lio_dev->fn_list.enable_io_queues(lio_dev))
1945 lio_free_sc_buffer_pool(lio_dev);
1946 if (lio_dev->mbox[0])
1947 lio_dev->fn_list.free_mbox(lio_dev);
1948 if (lio_dev->instr_queue[0])
1949 lio_free_instr_queue0(lio_dev);
1955 lio_eth_dev_uninit(struct rte_eth_dev *eth_dev)
1957 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1959 PMD_INIT_FUNC_TRACE();
1961 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1964 /* lio_free_sc_buffer_pool */
1965 lio_free_sc_buffer_pool(lio_dev);
1967 rte_free(eth_dev->data->mac_addrs);
1968 eth_dev->data->mac_addrs = NULL;
1970 eth_dev->dev_ops = NULL;
1971 eth_dev->rx_pkt_burst = NULL;
1972 eth_dev->tx_pkt_burst = NULL;
1978 lio_eth_dev_init(struct rte_eth_dev *eth_dev)
1980 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
1981 struct lio_device *lio_dev = LIO_DEV(eth_dev);
1983 PMD_INIT_FUNC_TRACE();
1985 eth_dev->rx_pkt_burst = &lio_dev_recv_pkts;
1986 eth_dev->tx_pkt_burst = &lio_dev_xmit_pkts;
1988 /* Primary does the initialization. */
1989 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1992 rte_eth_copy_pci_info(eth_dev, pdev);
1993 eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;
1995 if (pdev->mem_resource[0].addr) {
1996 lio_dev->hw_addr = pdev->mem_resource[0].addr;
1998 PMD_INIT_LOG(ERR, "ERROR: Failed to map BAR0\n");
2002 lio_dev->eth_dev = eth_dev;
2003 /* set lio device print string */
2004 snprintf(lio_dev->dev_string, sizeof(lio_dev->dev_string),
2005 "%s[%02x:%02x.%x]", pdev->driver->driver.name,
2006 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
2008 lio_dev->port_id = eth_dev->data->port_id;
2010 if (lio_first_time_init(lio_dev, pdev)) {
2011 lio_dev_err(lio_dev, "Device init failed\n");
2015 eth_dev->dev_ops = &liovf_eth_dev_ops;
2016 eth_dev->data->mac_addrs = rte_zmalloc("lio", ETHER_ADDR_LEN, 0);
2017 if (eth_dev->data->mac_addrs == NULL) {
2018 lio_dev_err(lio_dev,
2019 "MAC addresses memory allocation failed\n");
2020 eth_dev->dev_ops = NULL;
2021 eth_dev->rx_pkt_burst = NULL;
2022 eth_dev->tx_pkt_burst = NULL;
2026 rte_atomic64_set(&lio_dev->status, LIO_DEV_RUNNING);
2029 lio_dev->port_configured = 0;
2030 /* Always allow unicast packets */
2031 lio_dev->ifflags |= LIO_IFFLAG_UNICAST;
2037 lio_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2038 struct rte_pci_device *pci_dev)
2040 struct rte_eth_dev *eth_dev;
2043 eth_dev = rte_eth_dev_pci_allocate(pci_dev,
2044 sizeof(struct lio_device));
2045 if (eth_dev == NULL)
2048 ret = lio_eth_dev_init(eth_dev);
2050 rte_eth_dev_pci_release(eth_dev);
2056 lio_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2058 return rte_eth_dev_pci_generic_remove(pci_dev,
2059 lio_eth_dev_uninit);
2062 /* Set of PCI devices this driver supports */
2063 static const struct rte_pci_id pci_id_liovf_map[] = {
2064 { RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_VF_VID) },
2065 { .vendor_id = 0, /* sentinel */ }
2068 static struct rte_pci_driver rte_liovf_pmd = {
2069 .id_table = pci_id_liovf_map,
2070 .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
2071 .probe = lio_eth_dev_pci_probe,
2072 .remove = lio_eth_dev_pci_remove,
2075 RTE_PMD_REGISTER_PCI(net_liovf, rte_liovf_pmd);
2076 RTE_PMD_REGISTER_PCI_TABLE(net_liovf, pci_id_liovf_map);
2077 RTE_PMD_REGISTER_KMOD_DEP(net_liovf, "* igb_uio | vfio-pci");